pub struct MachineX86_64 {
assembler: AssemblerX64,
used_gprs: u32,
used_simd: u32,
trap_table: TrapTable,
instructions_address_map: Vec<InstructionAddressMap>,
src_loc: u32,
unwind_ops: Vec<(usize, UnwindOps)>,
}
Fields§
§assembler: AssemblerX64
§used_gprs: u32
§used_simd: u32
§trap_table: TrapTable
§instructions_address_map: Vec<InstructionAddressMap>
Map from byte offset into wasm function to range of native instructions.
src_loc: u32
The source location for the current operator.
unwind_ops: Vec<(usize, UnwindOps)>
Vector of unwind operations with offset
Implementations§
source§impl MachineX86_64
impl MachineX86_64
pub fn new(target: Option<Target>) -> Result<Self, CompileError>
pub fn emit_relaxed_binop( &mut self, op: fn(_: &mut AssemblerX64, _: Size, _: Location<GPR, XMM>, _: Location<GPR, XMM>) -> Result<(), CompileError>, sz: Size, src: Location<GPR, XMM>, dst: Location<GPR, XMM>, ) -> Result<(), CompileError>
pub fn emit_relaxed_zx_sx( &mut self, op: fn(_: &mut AssemblerX64, _: Size, _: Location<GPR, XMM>, _: Size, _: Location<GPR, XMM>) -> Result<(), CompileError>, sz_src: Size, src: Location<GPR, XMM>, sz_dst: Size, dst: Location<GPR, XMM>, ) -> Result<(), CompileError>
sourcefn emit_binop_i32(
&mut self,
f: fn(_: &mut AssemblerX64, _: Size, _: Location<GPR, XMM>, _: Location<GPR, XMM>) -> Result<(), CompileError>,
loc_a: Location<GPR, XMM>,
loc_b: Location<GPR, XMM>,
ret: Location<GPR, XMM>,
) -> Result<(), CompileError>
fn emit_binop_i32( &mut self, f: fn(_: &mut AssemblerX64, _: Size, _: Location<GPR, XMM>, _: Location<GPR, XMM>) -> Result<(), CompileError>, loc_a: Location<GPR, XMM>, loc_b: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>
I32 binary operation with both operands popped from the virtual stack.
sourcefn emit_binop_i64(
&mut self,
f: fn(_: &mut AssemblerX64, _: Size, _: Location<GPR, XMM>, _: Location<GPR, XMM>) -> Result<(), CompileError>,
loc_a: Location<GPR, XMM>,
loc_b: Location<GPR, XMM>,
ret: Location<GPR, XMM>,
) -> Result<(), CompileError>
fn emit_binop_i64( &mut self, f: fn(_: &mut AssemblerX64, _: Size, _: Location<GPR, XMM>, _: Location<GPR, XMM>) -> Result<(), CompileError>, loc_a: Location<GPR, XMM>, loc_b: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>
I64 binary operation with both operands popped from the virtual stack.
sourcefn emit_cmpop_i64_dynamic_b(
&mut self,
c: Condition,
loc_a: Location<GPR, XMM>,
loc_b: Location<GPR, XMM>,
ret: Location<GPR, XMM>,
) -> Result<(), CompileError>
fn emit_cmpop_i64_dynamic_b( &mut self, c: Condition, loc_a: Location<GPR, XMM>, loc_b: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>
I64 comparison with.
sourcefn emit_shift_i64(
&mut self,
f: fn(_: &mut AssemblerX64, _: Size, _: Location<GPR, XMM>, _: Location<GPR, XMM>) -> Result<(), CompileError>,
loc_a: Location<GPR, XMM>,
loc_b: Location<GPR, XMM>,
ret: Location<GPR, XMM>,
) -> Result<(), CompileError>
fn emit_shift_i64( &mut self, f: fn(_: &mut AssemblerX64, _: Size, _: Location<GPR, XMM>, _: Location<GPR, XMM>) -> Result<(), CompileError>, loc_a: Location<GPR, XMM>, loc_b: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>
I64 shift with both operands popped from the virtual stack.
sourcefn emit_relaxed_xdiv(
&mut self,
op: fn(_: &mut AssemblerX64, _: Size, _: Location<GPR, XMM>) -> Result<(), CompileError>,
sz: Size,
loc: Location<GPR, XMM>,
integer_division_by_zero: DynamicLabel,
) -> Result<usize, CompileError>
fn emit_relaxed_xdiv( &mut self, op: fn(_: &mut AssemblerX64, _: Size, _: Location<GPR, XMM>) -> Result<(), CompileError>, sz: Size, loc: Location<GPR, XMM>, integer_division_by_zero: DynamicLabel, ) -> Result<usize, CompileError>
Moves loc
to a valid location for div
/idiv
.
sourcefn emit_cmpop_i32_dynamic_b(
&mut self,
c: Condition,
loc_a: Location<GPR, XMM>,
loc_b: Location<GPR, XMM>,
ret: Location<GPR, XMM>,
) -> Result<(), CompileError>
fn emit_cmpop_i32_dynamic_b( &mut self, c: Condition, loc_a: Location<GPR, XMM>, loc_b: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>
I32 comparison with.
sourcefn emit_shift_i32(
&mut self,
f: fn(_: &mut AssemblerX64, _: Size, _: Location<GPR, XMM>, _: Location<GPR, XMM>) -> Result<(), CompileError>,
loc_a: Location<GPR, XMM>,
loc_b: Location<GPR, XMM>,
ret: Location<GPR, XMM>,
) -> Result<(), CompileError>
fn emit_shift_i32( &mut self, f: fn(_: &mut AssemblerX64, _: Size, _: Location<GPR, XMM>, _: Location<GPR, XMM>) -> Result<(), CompileError>, loc_a: Location<GPR, XMM>, loc_b: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>
I32 shift with both operands popped from the virtual stack.
fn memory_op<F: FnOnce(&mut Self, GPR) -> Result<(), CompileError>>( &mut self, addr: Location<GPR, XMM>, memarg: &MemArg, check_alignment: bool, value_size: usize, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, cb: F, ) -> Result<(), CompileError>
fn emit_compare_and_swap<F: FnOnce(&mut Self, GPR, GPR) -> Result<(), CompileError>>( &mut self, loc: Location<GPR, XMM>, target: Location<GPR, XMM>, ret: Location<GPR, XMM>, memarg: &MemArg, value_size: usize, memory_sz: Size, stack_sz: Size, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, cb: F, ) -> Result<(), CompileError>
fn emit_f32_int_conv_check( &mut self, reg: XMM, lower_bound: f32, upper_bound: f32, underflow_label: DynamicLabel, overflow_label: DynamicLabel, nan_label: DynamicLabel, succeed_label: DynamicLabel, ) -> Result<(), CompileError>
fn emit_f32_int_conv_check_trap( &mut self, reg: XMM, lower_bound: f32, upper_bound: f32, ) -> Result<(), CompileError>
fn emit_f32_int_conv_check_sat<F1: FnOnce(&mut Self) -> Result<(), CompileError>, F2: FnOnce(&mut Self) -> Result<(), CompileError>, F3: FnOnce(&mut Self) -> Result<(), CompileError>, F4: FnOnce(&mut Self) -> Result<(), CompileError>>( &mut self, reg: XMM, lower_bound: f32, upper_bound: f32, underflow_cb: F1, overflow_cb: F2, nan_cb: Option<F3>, convert_cb: F4, ) -> Result<(), CompileError>
fn emit_f64_int_conv_check( &mut self, reg: XMM, lower_bound: f64, upper_bound: f64, underflow_label: DynamicLabel, overflow_label: DynamicLabel, nan_label: DynamicLabel, succeed_label: DynamicLabel, ) -> Result<(), CompileError>
fn emit_f64_int_conv_check_trap( &mut self, reg: XMM, lower_bound: f64, upper_bound: f64, ) -> Result<(), CompileError>
fn emit_f64_int_conv_check_sat<F1: FnOnce(&mut Self) -> Result<(), CompileError>, F2: FnOnce(&mut Self) -> Result<(), CompileError>, F3: FnOnce(&mut Self) -> Result<(), CompileError>, F4: FnOnce(&mut Self) -> Result<(), CompileError>>( &mut self, reg: XMM, lower_bound: f64, upper_bound: f64, underflow_cb: F1, overflow_cb: F2, nan_cb: Option<F3>, convert_cb: F4, ) -> Result<(), CompileError>
sourcefn emit_relaxed_avx(
&mut self,
op: fn(_: &mut AssemblerX64, _: XMM, _: XMMOrMemory, _: XMM) -> Result<(), CompileError>,
src1: Location<GPR, XMM>,
src2: Location<GPR, XMM>,
dst: Location<GPR, XMM>,
) -> Result<(), CompileError>
fn emit_relaxed_avx( &mut self, op: fn(_: &mut AssemblerX64, _: XMM, _: XMMOrMemory, _: XMM) -> Result<(), CompileError>, src1: Location<GPR, XMM>, src2: Location<GPR, XMM>, dst: Location<GPR, XMM>, ) -> Result<(), CompileError>
Moves src1
and src2
to valid locations and possibly adds a layer of indirection for dst
for AVX instructions.
sourcefn emit_relaxed_avx_base<F: FnOnce(&mut Self, XMM, XMMOrMemory, XMM) -> Result<(), CompileError>>(
&mut self,
op: F,
src1: Location<GPR, XMM>,
src2: Location<GPR, XMM>,
dst: Location<GPR, XMM>,
) -> Result<(), CompileError>
fn emit_relaxed_avx_base<F: FnOnce(&mut Self, XMM, XMMOrMemory, XMM) -> Result<(), CompileError>>( &mut self, op: F, src1: Location<GPR, XMM>, src2: Location<GPR, XMM>, dst: Location<GPR, XMM>, ) -> Result<(), CompileError>
Moves src1
and src2
to valid locations and possibly adds a layer of indirection for dst
for AVX instructions.
fn convert_i64_f64_u_s( &mut self, loc: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>
fn convert_i64_f64_u_u( &mut self, loc: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>
fn convert_i64_f64_s_s( &mut self, loc: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>
fn convert_i64_f64_s_u( &mut self, loc: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>
fn convert_i32_f64_s_s( &mut self, loc: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>
fn convert_i32_f64_s_u( &mut self, loc: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>
fn convert_i32_f64_u_s( &mut self, loc: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>
fn convert_i32_f64_u_u( &mut self, loc: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>
fn convert_i64_f32_u_s( &mut self, loc: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>
fn convert_i64_f32_u_u( &mut self, loc: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>
fn convert_i64_f32_s_s( &mut self, loc: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>
fn convert_i64_f32_s_u( &mut self, loc: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>
fn convert_i32_f32_s_s( &mut self, loc: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>
fn convert_i32_f32_s_u( &mut self, loc: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>
fn convert_i32_f32_u_s( &mut self, loc: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>
fn convert_i32_f32_u_u( &mut self, loc: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>
fn emit_relaxed_atomic_xchg( &mut self, sz: Size, src: Location<GPR, XMM>, dst: Location<GPR, XMM>, ) -> Result<(), CompileError>
fn used_gprs_contains(&self, r: &GPR) -> bool
fn used_simd_contains(&self, r: &XMM) -> bool
fn used_gprs_insert(&mut self, r: GPR)
fn used_simd_insert(&mut self, r: XMM)
fn used_gprs_remove(&mut self, r: &GPR) -> bool
fn used_simd_remove(&mut self, r: &XMM) -> bool
fn emit_unwind_op(&mut self, op: UnwindOps) -> Result<(), CompileError>
fn emit_illegal_op_internal( &mut self, trap: TrapCode, ) -> Result<(), CompileError>
Trait Implementations§
source§impl Machine for MachineX86_64
impl Machine for MachineX86_64
source§fn set_srcloc(&mut self, offset: u32)
fn set_srcloc(&mut self, offset: u32)
Set the source location of the Wasm to the given offset.
source§fn mark_address_range_with_trap_code(
&mut self,
code: TrapCode,
begin: usize,
end: usize,
)
fn mark_address_range_with_trap_code( &mut self, code: TrapCode, begin: usize, end: usize, )
Marks each address in the code range emitted by f
with the trap code code
.
source§fn mark_address_with_trap_code(&mut self, code: TrapCode)
fn mark_address_with_trap_code(&mut self, code: TrapCode)
Marks one address as trappable with trap code code
.
source§fn mark_instruction_with_trap_code(&mut self, code: TrapCode) -> usize
fn mark_instruction_with_trap_code(&mut self, code: TrapCode) -> usize
Marks the instruction as trappable with trap code code
. return “begin” offset
source§fn mark_instruction_address_end(&mut self, begin: usize)
fn mark_instruction_address_end(&mut self, begin: usize)
Pushes the instruction to the address map, calculating the offset from a provided beginning address.
source§fn insert_stackoverflow(&mut self)
fn insert_stackoverflow(&mut self)
Insert a StackOverflow (at offset 0)
source§fn collect_trap_information(&self) -> Vec<TrapInformation>
fn collect_trap_information(&self) -> Vec<TrapInformation>
Get all current TrapInformation
type GPR = GPR
type SIMD = XMM
source§fn assembler_get_offset(&self) -> AssemblyOffset
fn assembler_get_offset(&self) -> AssemblyOffset
source§fn index_from_gpr(&self, x: GPR) -> RegisterIndex
fn index_from_gpr(&self, x: GPR) -> RegisterIndex
source§fn index_from_simd(&self, x: XMM) -> RegisterIndex
fn index_from_simd(&self, x: XMM) -> RegisterIndex
source§fn get_vmctx_reg(&self) -> GPR
fn get_vmctx_reg(&self) -> GPR
source§fn get_used_gprs(&self) -> Vec<GPR>
fn get_used_gprs(&self) -> Vec<GPR>
source§fn get_used_simd(&self) -> Vec<XMM>
fn get_used_simd(&self) -> Vec<XMM>
source§fn pick_gpr(&self) -> Option<GPR>
fn pick_gpr(&self) -> Option<GPR>
source§fn pick_temp_gpr(&self) -> Option<GPR>
fn pick_temp_gpr(&self) -> Option<GPR>
source§fn acquire_temp_gpr(&mut self) -> Option<GPR>
fn acquire_temp_gpr(&mut self) -> Option<GPR>
source§fn release_gpr(&mut self, gpr: GPR)
fn release_gpr(&mut self, gpr: GPR)
source§fn reserve_unused_temp_gpr(&mut self, gpr: GPR) -> GPR
fn reserve_unused_temp_gpr(&mut self, gpr: GPR) -> GPR
source§fn reserve_gpr(&mut self, gpr: GPR)
fn reserve_gpr(&mut self, gpr: GPR)
source§fn push_used_gpr(&mut self, used_gprs: &[GPR]) -> Result<usize, CompileError>
fn push_used_gpr(&mut self, used_gprs: &[GPR]) -> Result<usize, CompileError>
source§fn pop_used_gpr(&mut self, used_gprs: &[GPR]) -> Result<(), CompileError>
fn pop_used_gpr(&mut self, used_gprs: &[GPR]) -> Result<(), CompileError>
source§fn pick_temp_simd(&self) -> Option<XMM>
fn pick_temp_simd(&self) -> Option<XMM>
source§fn acquire_temp_simd(&mut self) -> Option<XMM>
fn acquire_temp_simd(&mut self) -> Option<XMM>
source§fn reserve_simd(&mut self, simd: XMM)
fn reserve_simd(&mut self, simd: XMM)
source§fn release_simd(&mut self, simd: XMM)
fn release_simd(&mut self, simd: XMM)
source§fn push_used_simd(&mut self, used_xmms: &[XMM]) -> Result<usize, CompileError>
fn push_used_simd(&mut self, used_xmms: &[XMM]) -> Result<usize, CompileError>
source§fn pop_used_simd(&mut self, used_xmms: &[XMM]) -> Result<(), CompileError>
fn pop_used_simd(&mut self, used_xmms: &[XMM]) -> Result<(), CompileError>
fn instructions_address_map(&self) -> Vec<InstructionAddressMap>
source§fn local_on_stack(&mut self, stack_offset: i32) -> Location<GPR, XMM>
fn local_on_stack(&mut self, stack_offset: i32) -> Location<GPR, XMM>
source§fn round_stack_adjust(&self, value: usize) -> usize
fn round_stack_adjust(&self, value: usize) -> usize
source§fn adjust_stack(&mut self, delta_stack_offset: u32) -> Result<(), CompileError>
fn adjust_stack(&mut self, delta_stack_offset: u32) -> Result<(), CompileError>
source§fn restore_stack(&mut self, delta_stack_offset: u32) -> Result<(), CompileError>
fn restore_stack(&mut self, delta_stack_offset: u32) -> Result<(), CompileError>
source§fn pop_stack_locals(
&mut self,
delta_stack_offset: u32,
) -> Result<(), CompileError>
fn pop_stack_locals( &mut self, delta_stack_offset: u32, ) -> Result<(), CompileError>
source§fn move_location_for_native(
&mut self,
_size: Size,
loc: Location<GPR, XMM>,
dest: Location<GPR, XMM>,
) -> Result<(), CompileError>
fn move_location_for_native( &mut self, _size: Size, loc: Location<GPR, XMM>, dest: Location<GPR, XMM>, ) -> Result<(), CompileError>
source§fn zero_location(
&mut self,
size: Size,
location: Location<GPR, XMM>,
) -> Result<(), CompileError>
fn zero_location( &mut self, size: Size, location: Location<GPR, XMM>, ) -> Result<(), CompileError>
source§fn local_pointer(&self) -> GPR
fn local_pointer(&self) -> GPR
source§fn is_local_on_stack(&self, idx: usize) -> bool
fn is_local_on_stack(&self, idx: usize) -> bool
source§fn get_local_location(
&self,
idx: usize,
callee_saved_regs_size: usize,
) -> Location<GPR, XMM>
fn get_local_location( &self, idx: usize, callee_saved_regs_size: usize, ) -> Location<GPR, XMM>
source§fn move_local(
&mut self,
stack_offset: i32,
location: Location<GPR, XMM>,
) -> Result<(), CompileError>
fn move_local( &mut self, stack_offset: i32, location: Location<GPR, XMM>, ) -> Result<(), CompileError>
source§fn list_to_save(
&self,
calling_convention: CallingConvention,
) -> Vec<Location<GPR, XMM>>
fn list_to_save( &self, calling_convention: CallingConvention, ) -> Vec<Location<GPR, XMM>>
source§fn get_param_location(
&self,
idx: usize,
_sz: Size,
stack_location: &mut usize,
calling_convention: CallingConvention,
) -> Location<GPR, XMM>
fn get_param_location( &self, idx: usize, _sz: Size, stack_location: &mut usize, calling_convention: CallingConvention, ) -> Location<GPR, XMM>
source§fn get_call_param_location(
&self,
idx: usize,
_sz: Size,
_stack_location: &mut usize,
calling_convention: CallingConvention,
) -> Location<GPR, XMM>
fn get_call_param_location( &self, idx: usize, _sz: Size, _stack_location: &mut usize, calling_convention: CallingConvention, ) -> Location<GPR, XMM>
source§fn get_simple_param_location(
&self,
idx: usize,
calling_convention: CallingConvention,
) -> Location<GPR, XMM>
fn get_simple_param_location( &self, idx: usize, calling_convention: CallingConvention, ) -> Location<GPR, XMM>
source§fn move_location(
&mut self,
size: Size,
source: Location<GPR, XMM>,
dest: Location<GPR, XMM>,
) -> Result<(), CompileError>
fn move_location( &mut self, size: Size, source: Location<GPR, XMM>, dest: Location<GPR, XMM>, ) -> Result<(), CompileError>
source§fn move_location_extend(
&mut self,
size_val: Size,
signed: bool,
source: Location<GPR, XMM>,
size_op: Size,
dest: Location<GPR, XMM>,
) -> Result<(), CompileError>
fn move_location_extend( &mut self, size_val: Size, signed: bool, source: Location<GPR, XMM>, size_op: Size, dest: Location<GPR, XMM>, ) -> Result<(), CompileError>
source§fn load_address(
&mut self,
size: Size,
reg: Location<GPR, XMM>,
mem: Location<GPR, XMM>,
) -> Result<(), CompileError>
fn load_address( &mut self, size: Size, reg: Location<GPR, XMM>, mem: Location<GPR, XMM>, ) -> Result<(), CompileError>
source§fn init_stack_loc(
&mut self,
init_stack_loc_cnt: u64,
last_stack_loc: Location<GPR, XMM>,
) -> Result<(), CompileError>
fn init_stack_loc( &mut self, init_stack_loc_cnt: u64, last_stack_loc: Location<GPR, XMM>, ) -> Result<(), CompileError>
source§fn restore_saved_area(
&mut self,
saved_area_offset: i32,
) -> Result<(), CompileError>
fn restore_saved_area( &mut self, saved_area_offset: i32, ) -> Result<(), CompileError>
source§fn pop_location(
&mut self,
location: Location<GPR, XMM>,
) -> Result<(), CompileError>
fn pop_location( &mut self, location: Location<GPR, XMM>, ) -> Result<(), CompileError>
source§fn new_machine_state(&self) -> MachineState
fn new_machine_state(&self) -> MachineState
MachineState
with default values.source§fn get_offset(&self) -> AssemblyOffset
fn get_offset(&self) -> AssemblyOffset
source§fn finalize_function(&mut self) -> Result<(), CompileError>
fn finalize_function(&mut self) -> Result<(), CompileError>
source§fn emit_function_prolog(&mut self) -> Result<(), CompileError>
fn emit_function_prolog(&mut self) -> Result<(), CompileError>
source§fn emit_function_epilog(&mut self) -> Result<(), CompileError>
fn emit_function_epilog(&mut self) -> Result<(), CompileError>
source§fn emit_function_return_value(
&mut self,
ty: WpType,
canonicalize: bool,
loc: Location<GPR, XMM>,
) -> Result<(), CompileError>
fn emit_function_return_value( &mut self, ty: WpType, canonicalize: bool, loc: Location<GPR, XMM>, ) -> Result<(), CompileError>
source§fn emit_function_return_float(&mut self) -> Result<(), CompileError>
fn emit_function_return_float(&mut self) -> Result<(), CompileError>
source§fn arch_supports_canonicalize_nan(&self) -> bool
fn arch_supports_canonicalize_nan(&self) -> bool
source§fn canonicalize_nan(
&mut self,
sz: Size,
input: Location<GPR, XMM>,
output: Location<GPR, XMM>,
) -> Result<(), CompileError>
fn canonicalize_nan( &mut self, sz: Size, input: Location<GPR, XMM>, output: Location<GPR, XMM>, ) -> Result<(), CompileError>
source§fn emit_illegal_op(&mut self, trap: TrapCode) -> Result<(), CompileError>
fn emit_illegal_op(&mut self, trap: TrapCode) -> Result<(), CompileError>
source§fn emit_label(&mut self, label: DynamicLabel) -> Result<(), CompileError>
fn emit_label(&mut self, label: DynamicLabel) -> Result<(), CompileError>
source§fn get_grp_for_call(&self) -> GPR
fn get_grp_for_call(&self) -> GPR
source§fn emit_call_register(&mut self, reg: GPR) -> Result<(), CompileError>
fn emit_call_register(&mut self, reg: GPR) -> Result<(), CompileError>
source§fn emit_call_label(&mut self, label: DynamicLabel) -> Result<(), CompileError>
fn emit_call_label(&mut self, label: DynamicLabel) -> Result<(), CompileError>
source§fn get_gpr_for_ret(&self) -> GPR
fn get_gpr_for_ret(&self) -> GPR
source§fn get_simd_for_ret(&self) -> XMM
fn get_simd_for_ret(&self) -> XMM
source§fn arch_requires_indirect_call_trampoline(&self) -> bool
fn arch_requires_indirect_call_trampoline(&self) -> bool
source§fn arch_emit_indirect_call_with_trampoline(
&mut self,
location: Location<GPR, XMM>,
) -> Result<(), CompileError>
fn arch_emit_indirect_call_with_trampoline( &mut self, location: Location<GPR, XMM>, ) -> Result<(), CompileError>
source§fn emit_debug_breakpoint(&mut self) -> Result<(), CompileError>
fn emit_debug_breakpoint(&mut self) -> Result<(), CompileError>
source§fn emit_call_location(
&mut self,
location: Location<GPR, XMM>,
) -> Result<(), CompileError>
fn emit_call_location( &mut self, location: Location<GPR, XMM>, ) -> Result<(), CompileError>
source§fn location_address(
&mut self,
size: Size,
source: Location<GPR, XMM>,
dest: Location<GPR, XMM>,
) -> Result<(), CompileError>
fn location_address( &mut self, size: Size, source: Location<GPR, XMM>, dest: Location<GPR, XMM>, ) -> Result<(), CompileError>
source§fn location_and(
&mut self,
size: Size,
source: Location<GPR, XMM>,
dest: Location<GPR, XMM>,
_flags: bool,
) -> Result<(), CompileError>
fn location_and( &mut self, size: Size, source: Location<GPR, XMM>, dest: Location<GPR, XMM>, _flags: bool, ) -> Result<(), CompileError>
source§fn location_xor(
&mut self,
size: Size,
source: Location<GPR, XMM>,
dest: Location<GPR, XMM>,
_flags: bool,
) -> Result<(), CompileError>
fn location_xor( &mut self, size: Size, source: Location<GPR, XMM>, dest: Location<GPR, XMM>, _flags: bool, ) -> Result<(), CompileError>
source§fn location_or(
&mut self,
size: Size,
source: Location<GPR, XMM>,
dest: Location<GPR, XMM>,
_flags: bool,
) -> Result<(), CompileError>
fn location_or( &mut self, size: Size, source: Location<GPR, XMM>, dest: Location<GPR, XMM>, _flags: bool, ) -> Result<(), CompileError>
source§fn location_test(
&mut self,
size: Size,
source: Location<GPR, XMM>,
dest: Location<GPR, XMM>,
) -> Result<(), CompileError>
fn location_test( &mut self, size: Size, source: Location<GPR, XMM>, dest: Location<GPR, XMM>, ) -> Result<(), CompileError>
source§fn location_add(
&mut self,
size: Size,
source: Location<GPR, XMM>,
dest: Location<GPR, XMM>,
_flags: bool,
) -> Result<(), CompileError>
fn location_add( &mut self, size: Size, source: Location<GPR, XMM>, dest: Location<GPR, XMM>, _flags: bool, ) -> Result<(), CompileError>
source§fn location_sub(
&mut self,
size: Size,
source: Location<GPR, XMM>,
dest: Location<GPR, XMM>,
_flags: bool,
) -> Result<(), CompileError>
fn location_sub( &mut self, size: Size, source: Location<GPR, XMM>, dest: Location<GPR, XMM>, _flags: bool, ) -> Result<(), CompileError>
source§fn location_cmp(
&mut self,
size: Size,
source: Location<GPR, XMM>,
dest: Location<GPR, XMM>,
) -> Result<(), CompileError>
fn location_cmp( &mut self, size: Size, source: Location<GPR, XMM>, dest: Location<GPR, XMM>, ) -> Result<(), CompileError>
source§fn jmp_unconditionnal(
&mut self,
label: DynamicLabel,
) -> Result<(), CompileError>
fn jmp_unconditionnal( &mut self, label: DynamicLabel, ) -> Result<(), CompileError>
source§fn jmp_on_equal(&mut self, label: DynamicLabel) -> Result<(), CompileError>
fn jmp_on_equal(&mut self, label: DynamicLabel) -> Result<(), CompileError>
source§fn jmp_on_different(&mut self, label: DynamicLabel) -> Result<(), CompileError>
fn jmp_on_different(&mut self, label: DynamicLabel) -> Result<(), CompileError>
source§fn jmp_on_above(&mut self, label: DynamicLabel) -> Result<(), CompileError>
fn jmp_on_above(&mut self, label: DynamicLabel) -> Result<(), CompileError>
source§fn jmp_on_aboveequal(&mut self, label: DynamicLabel) -> Result<(), CompileError>
fn jmp_on_aboveequal(&mut self, label: DynamicLabel) -> Result<(), CompileError>
source§fn jmp_on_belowequal(&mut self, label: DynamicLabel) -> Result<(), CompileError>
fn jmp_on_belowequal(&mut self, label: DynamicLabel) -> Result<(), CompileError>
source§fn jmp_on_overflow(&mut self, label: DynamicLabel) -> Result<(), CompileError>
fn jmp_on_overflow(&mut self, label: DynamicLabel) -> Result<(), CompileError>
source§fn emit_jmp_to_jumptable(
&mut self,
label: DynamicLabel,
cond: Location<GPR, XMM>,
) -> Result<(), CompileError>
fn emit_jmp_to_jumptable( &mut self, label: DynamicLabel, cond: Location<GPR, XMM>, ) -> Result<(), CompileError>
source§fn align_for_loop(&mut self) -> Result<(), CompileError>
fn align_for_loop(&mut self) -> Result<(), CompileError>
source§fn emit_push(
&mut self,
size: Size,
loc: Location<GPR, XMM>,
) -> Result<(), CompileError>
fn emit_push( &mut self, size: Size, loc: Location<GPR, XMM>, ) -> Result<(), CompileError>
source§fn emit_pop(
&mut self,
size: Size,
loc: Location<GPR, XMM>,
) -> Result<(), CompileError>
fn emit_pop( &mut self, size: Size, loc: Location<GPR, XMM>, ) -> Result<(), CompileError>
source§fn emit_memory_fence(&mut self) -> Result<(), CompileError>
fn emit_memory_fence(&mut self) -> Result<(), CompileError>
source§fn location_neg(
&mut self,
size_val: Size,
signed: bool,
source: Location<GPR, XMM>,
size_op: Size,
dest: Location<GPR, XMM>,
) -> Result<(), CompileError>
fn location_neg( &mut self, size_val: Size, signed: bool, source: Location<GPR, XMM>, size_op: Size, dest: Location<GPR, XMM>, ) -> Result<(), CompileError>
source§fn emit_imul_imm32(
&mut self,
size: Size,
imm32: u32,
gpr: GPR,
) -> Result<(), CompileError>
fn emit_imul_imm32( &mut self, size: Size, imm32: u32, gpr: GPR, ) -> Result<(), CompileError>
source§fn emit_relaxed_mov(
&mut self,
sz: Size,
src: Location<GPR, XMM>,
dst: Location<GPR, XMM>,
) -> Result<(), CompileError>
fn emit_relaxed_mov( &mut self, sz: Size, src: Location<GPR, XMM>, dst: Location<GPR, XMM>, ) -> Result<(), CompileError>
source§fn emit_relaxed_cmp(
&mut self,
sz: Size,
src: Location<GPR, XMM>,
dst: Location<GPR, XMM>,
) -> Result<(), CompileError>
fn emit_relaxed_cmp( &mut self, sz: Size, src: Location<GPR, XMM>, dst: Location<GPR, XMM>, ) -> Result<(), CompileError>
source§fn emit_relaxed_zero_extension(
&mut self,
sz_src: Size,
src: Location<GPR, XMM>,
sz_dst: Size,
dst: Location<GPR, XMM>,
) -> Result<(), CompileError>
fn emit_relaxed_zero_extension( &mut self, sz_src: Size, src: Location<GPR, XMM>, sz_dst: Size, dst: Location<GPR, XMM>, ) -> Result<(), CompileError>
source§fn emit_relaxed_sign_extension(
&mut self,
sz_src: Size,
src: Location<GPR, XMM>,
sz_dst: Size,
dst: Location<GPR, XMM>,
) -> Result<(), CompileError>
fn emit_relaxed_sign_extension( &mut self, sz_src: Size, src: Location<GPR, XMM>, sz_dst: Size, dst: Location<GPR, XMM>, ) -> Result<(), CompileError>
source§fn emit_binop_add32(
&mut self,
loc_a: Location<GPR, XMM>,
loc_b: Location<GPR, XMM>,
ret: Location<GPR, XMM>,
) -> Result<(), CompileError>
fn emit_binop_add32( &mut self, loc_a: Location<GPR, XMM>, loc_b: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>
source§fn emit_binop_sub32(
&mut self,
loc_a: Location<GPR, XMM>,
loc_b: Location<GPR, XMM>,
ret: Location<GPR, XMM>,
) -> Result<(), CompileError>
fn emit_binop_sub32( &mut self, loc_a: Location<GPR, XMM>, loc_b: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>
source§fn emit_binop_mul32(
&mut self,
loc_a: Location<GPR, XMM>,
loc_b: Location<GPR, XMM>,
ret: Location<GPR, XMM>,
) -> Result<(), CompileError>
fn emit_binop_mul32( &mut self, loc_a: Location<GPR, XMM>, loc_b: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>
source§fn emit_binop_udiv32(
&mut self,
loc_a: Location<GPR, XMM>,
loc_b: Location<GPR, XMM>,
ret: Location<GPR, XMM>,
integer_division_by_zero: DynamicLabel,
_integer_overflow: DynamicLabel,
) -> Result<usize, CompileError>
fn emit_binop_udiv32( &mut self, loc_a: Location<GPR, XMM>, loc_b: Location<GPR, XMM>, ret: Location<GPR, XMM>, integer_division_by_zero: DynamicLabel, _integer_overflow: DynamicLabel, ) -> Result<usize, CompileError>
source§fn emit_binop_sdiv32(
&mut self,
loc_a: Location<GPR, XMM>,
loc_b: Location<GPR, XMM>,
ret: Location<GPR, XMM>,
integer_division_by_zero: DynamicLabel,
_integer_overflow: DynamicLabel,
) -> Result<usize, CompileError>
fn emit_binop_sdiv32( &mut self, loc_a: Location<GPR, XMM>, loc_b: Location<GPR, XMM>, ret: Location<GPR, XMM>, integer_division_by_zero: DynamicLabel, _integer_overflow: DynamicLabel, ) -> Result<usize, CompileError>
source§fn emit_binop_urem32(
&mut self,
loc_a: Location<GPR, XMM>,
loc_b: Location<GPR, XMM>,
ret: Location<GPR, XMM>,
integer_division_by_zero: DynamicLabel,
_integer_overflow: DynamicLabel,
) -> Result<usize, CompileError>
fn emit_binop_urem32( &mut self, loc_a: Location<GPR, XMM>, loc_b: Location<GPR, XMM>, ret: Location<GPR, XMM>, integer_division_by_zero: DynamicLabel, _integer_overflow: DynamicLabel, ) -> Result<usize, CompileError>
source§fn emit_binop_srem32(
&mut self,
loc_a: Location<GPR, XMM>,
loc_b: Location<GPR, XMM>,
ret: Location<GPR, XMM>,
integer_division_by_zero: DynamicLabel,
_integer_overflow: DynamicLabel,
) -> Result<usize, CompileError>
fn emit_binop_srem32( &mut self, loc_a: Location<GPR, XMM>, loc_b: Location<GPR, XMM>, ret: Location<GPR, XMM>, integer_division_by_zero: DynamicLabel, _integer_overflow: DynamicLabel, ) -> Result<usize, CompileError>
source§fn emit_binop_and32(
&mut self,
loc_a: Location<GPR, XMM>,
loc_b: Location<GPR, XMM>,
ret: Location<GPR, XMM>,
) -> Result<(), CompileError>
fn emit_binop_and32( &mut self, loc_a: Location<GPR, XMM>, loc_b: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>
source§fn emit_binop_or32(
&mut self,
loc_a: Location<GPR, XMM>,
loc_b: Location<GPR, XMM>,
ret: Location<GPR, XMM>,
) -> Result<(), CompileError>
fn emit_binop_or32( &mut self, loc_a: Location<GPR, XMM>, loc_b: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>
source§fn emit_binop_xor32(
&mut self,
loc_a: Location<GPR, XMM>,
loc_b: Location<GPR, XMM>,
ret: Location<GPR, XMM>,
) -> Result<(), CompileError>
fn emit_binop_xor32( &mut self, loc_a: Location<GPR, XMM>, loc_b: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>
source§fn i32_cmp_ge_s(
&mut self,
loc_a: Location<GPR, XMM>,
loc_b: Location<GPR, XMM>,
ret: Location<GPR, XMM>,
) -> Result<(), CompileError>
fn i32_cmp_ge_s( &mut self, loc_a: Location<GPR, XMM>, loc_b: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>
source§fn i32_cmp_gt_s(
&mut self,
loc_a: Location<GPR, XMM>,
loc_b: Location<GPR, XMM>,
ret: Location<GPR, XMM>,
) -> Result<(), CompileError>
fn i32_cmp_gt_s( &mut self, loc_a: Location<GPR, XMM>, loc_b: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>
source§fn i32_cmp_le_s(
&mut self,
loc_a: Location<GPR, XMM>,
loc_b: Location<GPR, XMM>,
ret: Location<GPR, XMM>,
) -> Result<(), CompileError>
fn i32_cmp_le_s( &mut self, loc_a: Location<GPR, XMM>, loc_b: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>
source§fn i32_cmp_lt_s(
&mut self,
loc_a: Location<GPR, XMM>,
loc_b: Location<GPR, XMM>,
ret: Location<GPR, XMM>,
) -> Result<(), CompileError>
fn i32_cmp_lt_s( &mut self, loc_a: Location<GPR, XMM>, loc_b: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>
source§fn i32_cmp_ge_u(
&mut self,
loc_a: Location<GPR, XMM>,
loc_b: Location<GPR, XMM>,
ret: Location<GPR, XMM>,
) -> Result<(), CompileError>
fn i32_cmp_ge_u( &mut self, loc_a: Location<GPR, XMM>, loc_b: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>
source§fn i32_cmp_gt_u(
&mut self,
loc_a: Location<GPR, XMM>,
loc_b: Location<GPR, XMM>,
ret: Location<GPR, XMM>,
) -> Result<(), CompileError>
fn i32_cmp_gt_u( &mut self, loc_a: Location<GPR, XMM>, loc_b: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>
source§fn i32_cmp_le_u(
&mut self,
loc_a: Location<GPR, XMM>,
loc_b: Location<GPR, XMM>,
ret: Location<GPR, XMM>,
) -> Result<(), CompileError>
fn i32_cmp_le_u( &mut self, loc_a: Location<GPR, XMM>, loc_b: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>
source§fn i32_cmp_lt_u(
&mut self,
loc_a: Location<GPR, XMM>,
loc_b: Location<GPR, XMM>,
ret: Location<GPR, XMM>,
) -> Result<(), CompileError>
fn i32_cmp_lt_u( &mut self, loc_a: Location<GPR, XMM>, loc_b: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>
source§fn i32_cmp_ne(
&mut self,
loc_a: Location<GPR, XMM>,
loc_b: Location<GPR, XMM>,
ret: Location<GPR, XMM>,
) -> Result<(), CompileError>
fn i32_cmp_ne( &mut self, loc_a: Location<GPR, XMM>, loc_b: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>
source§fn i32_cmp_eq(
&mut self,
loc_a: Location<GPR, XMM>,
loc_b: Location<GPR, XMM>,
ret: Location<GPR, XMM>,
) -> Result<(), CompileError>
fn i32_cmp_eq( &mut self, loc_a: Location<GPR, XMM>, loc_b: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>
source§fn i32_clz(
&mut self,
loc: Location<GPR, XMM>,
ret: Location<GPR, XMM>,
) -> Result<(), CompileError>
fn i32_clz( &mut self, loc: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>
source§fn i32_ctz(
&mut self,
loc: Location<GPR, XMM>,
ret: Location<GPR, XMM>,
) -> Result<(), CompileError>
fn i32_ctz( &mut self, loc: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>
source§fn i32_popcnt(
&mut self,
loc: Location<GPR, XMM>,
ret: Location<GPR, XMM>,
) -> Result<(), CompileError>
fn i32_popcnt( &mut self, loc: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>
source§fn i32_shl(
&mut self,
loc_a: Location<GPR, XMM>,
loc_b: Location<GPR, XMM>,
ret: Location<GPR, XMM>,
) -> Result<(), CompileError>
fn i32_shl( &mut self, loc_a: Location<GPR, XMM>, loc_b: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>
source§fn i32_shr(
&mut self,
loc_a: Location<GPR, XMM>,
loc_b: Location<GPR, XMM>,
ret: Location<GPR, XMM>,
) -> Result<(), CompileError>
fn i32_shr( &mut self, loc_a: Location<GPR, XMM>, loc_b: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>
source§fn i32_sar(
&mut self,
loc_a: Location<GPR, XMM>,
loc_b: Location<GPR, XMM>,
ret: Location<GPR, XMM>,
) -> Result<(), CompileError>
fn i32_sar( &mut self, loc_a: Location<GPR, XMM>, loc_b: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>
source§fn i32_rol(
&mut self,
loc_a: Location<GPR, XMM>,
loc_b: Location<GPR, XMM>,
ret: Location<GPR, XMM>,
) -> Result<(), CompileError>
fn i32_rol( &mut self, loc_a: Location<GPR, XMM>, loc_b: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>
source§fn i32_ror(
&mut self,
loc_a: Location<GPR, XMM>,
loc_b: Location<GPR, XMM>,
ret: Location<GPR, XMM>,
) -> Result<(), CompileError>
fn i32_ror( &mut self, loc_a: Location<GPR, XMM>, loc_b: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>
source§fn i32_load(
&mut self,
addr: Location<GPR, XMM>,
memarg: &MemArg,
ret: Location<GPR, XMM>,
need_check: bool,
imported_memories: bool,
offset: i32,
heap_access_oob: DynamicLabel,
unaligned_atomic: DynamicLabel,
) -> Result<(), CompileError>
fn i32_load( &mut self, addr: Location<GPR, XMM>, memarg: &MemArg, ret: Location<GPR, XMM>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>
source§fn i32_load_8u(
&mut self,
addr: Location<GPR, XMM>,
memarg: &MemArg,
ret: Location<GPR, XMM>,
need_check: bool,
imported_memories: bool,
offset: i32,
heap_access_oob: DynamicLabel,
unaligned_atomic: DynamicLabel,
) -> Result<(), CompileError>
fn i32_load_8u( &mut self, addr: Location<GPR, XMM>, memarg: &MemArg, ret: Location<GPR, XMM>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>
source§fn i32_load_8s(
&mut self,
addr: Location<GPR, XMM>,
memarg: &MemArg,
ret: Location<GPR, XMM>,
need_check: bool,
imported_memories: bool,
offset: i32,
heap_access_oob: DynamicLabel,
unaligned_atomic: DynamicLabel,
) -> Result<(), CompileError>
fn i32_load_8s( &mut self, addr: Location<GPR, XMM>, memarg: &MemArg, ret: Location<GPR, XMM>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>
source§fn i32_load_16u(
&mut self,
addr: Location<GPR, XMM>,
memarg: &MemArg,
ret: Location<GPR, XMM>,
need_check: bool,
imported_memories: bool,
offset: i32,
heap_access_oob: DynamicLabel,
unaligned_atomic: DynamicLabel,
) -> Result<(), CompileError>
fn i32_load_16u( &mut self, addr: Location<GPR, XMM>, memarg: &MemArg, ret: Location<GPR, XMM>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>
source§fn i32_load_16s(
&mut self,
addr: Location<GPR, XMM>,
memarg: &MemArg,
ret: Location<GPR, XMM>,
need_check: bool,
imported_memories: bool,
offset: i32,
heap_access_oob: DynamicLabel,
unaligned_atomic: DynamicLabel,
) -> Result<(), CompileError>
fn i32_load_16s( &mut self, addr: Location<GPR, XMM>, memarg: &MemArg, ret: Location<GPR, XMM>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>
source§fn i32_atomic_load(
&mut self,
addr: Location<GPR, XMM>,
memarg: &MemArg,
ret: Location<GPR, XMM>,
need_check: bool,
imported_memories: bool,
offset: i32,
heap_access_oob: DynamicLabel,
unaligned_atomic: DynamicLabel,
) -> Result<(), CompileError>
fn i32_atomic_load( &mut self, addr: Location<GPR, XMM>, memarg: &MemArg, ret: Location<GPR, XMM>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>
source§fn i32_atomic_load_8u(
&mut self,
addr: Location<GPR, XMM>,
memarg: &MemArg,
ret: Location<GPR, XMM>,
need_check: bool,
imported_memories: bool,
offset: i32,
heap_access_oob: DynamicLabel,
unaligned_atomic: DynamicLabel,
) -> Result<(), CompileError>
fn i32_atomic_load_8u( &mut self, addr: Location<GPR, XMM>, memarg: &MemArg, ret: Location<GPR, XMM>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>
source§fn i32_atomic_load_16u(
&mut self,
addr: Location<GPR, XMM>,
memarg: &MemArg,
ret: Location<GPR, XMM>,
need_check: bool,
imported_memories: bool,
offset: i32,
heap_access_oob: DynamicLabel,
unaligned_atomic: DynamicLabel,
) -> Result<(), CompileError>
fn i32_atomic_load_16u( &mut self, addr: Location<GPR, XMM>, memarg: &MemArg, ret: Location<GPR, XMM>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>
source§fn i32_save(
&mut self,
target_value: Location<GPR, XMM>,
memarg: &MemArg,
target_addr: Location<GPR, XMM>,
need_check: bool,
imported_memories: bool,
offset: i32,
heap_access_oob: DynamicLabel,
unaligned_atomic: DynamicLabel,
) -> Result<(), CompileError>
fn i32_save( &mut self, target_value: Location<GPR, XMM>, memarg: &MemArg, target_addr: Location<GPR, XMM>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>
source§fn i32_save_8(
&mut self,
target_value: Location<GPR, XMM>,
memarg: &MemArg,
target_addr: Location<GPR, XMM>,
need_check: bool,
imported_memories: bool,
offset: i32,
heap_access_oob: DynamicLabel,
unaligned_atomic: DynamicLabel,
) -> Result<(), CompileError>
fn i32_save_8( &mut self, target_value: Location<GPR, XMM>, memarg: &MemArg, target_addr: Location<GPR, XMM>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>
source§fn i32_save_16(
&mut self,
target_value: Location<GPR, XMM>,
memarg: &MemArg,
target_addr: Location<GPR, XMM>,
need_check: bool,
imported_memories: bool,
offset: i32,
heap_access_oob: DynamicLabel,
unaligned_atomic: DynamicLabel,
) -> Result<(), CompileError>
fn i32_save_16( &mut self, target_value: Location<GPR, XMM>, memarg: &MemArg, target_addr: Location<GPR, XMM>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>
source§fn i32_atomic_save(
&mut self,
value: Location<GPR, XMM>,
memarg: &MemArg,
target_addr: Location<GPR, XMM>,
need_check: bool,
imported_memories: bool,
offset: i32,
heap_access_oob: DynamicLabel,
unaligned_atomic: DynamicLabel,
) -> Result<(), CompileError>
fn i32_atomic_save( &mut self, value: Location<GPR, XMM>, memarg: &MemArg, target_addr: Location<GPR, XMM>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>
source§fn i32_atomic_save_8(
&mut self,
value: Location<GPR, XMM>,
memarg: &MemArg,
target_addr: Location<GPR, XMM>,
need_check: bool,
imported_memories: bool,
offset: i32,
heap_access_oob: DynamicLabel,
unaligned_atomic: DynamicLabel,
) -> Result<(), CompileError>
fn i32_atomic_save_8( &mut self, value: Location<GPR, XMM>, memarg: &MemArg, target_addr: Location<GPR, XMM>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>
source§fn i32_atomic_save_16(
&mut self,
value: Location<GPR, XMM>,
memarg: &MemArg,
target_addr: Location<GPR, XMM>,
need_check: bool,
imported_memories: bool,
offset: i32,
heap_access_oob: DynamicLabel,
unaligned_atomic: DynamicLabel,
) -> Result<(), CompileError>
fn i32_atomic_save_16( &mut self, value: Location<GPR, XMM>, memarg: &MemArg, target_addr: Location<GPR, XMM>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>
source§fn i32_atomic_add(
&mut self,
loc: Location<GPR, XMM>,
target: Location<GPR, XMM>,
memarg: &MemArg,
ret: Location<GPR, XMM>,
need_check: bool,
imported_memories: bool,
offset: i32,
heap_access_oob: DynamicLabel,
unaligned_atomic: DynamicLabel,
) -> Result<(), CompileError>
fn i32_atomic_add( &mut self, loc: Location<GPR, XMM>, target: Location<GPR, XMM>, memarg: &MemArg, ret: Location<GPR, XMM>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>
source§fn i32_atomic_add_8u(
&mut self,
loc: Location<GPR, XMM>,
target: Location<GPR, XMM>,
memarg: &MemArg,
ret: Location<GPR, XMM>,
need_check: bool,
imported_memories: bool,
offset: i32,
heap_access_oob: DynamicLabel,
unaligned_atomic: DynamicLabel,
) -> Result<(), CompileError>
fn i32_atomic_add_8u( &mut self, loc: Location<GPR, XMM>, target: Location<GPR, XMM>, memarg: &MemArg, ret: Location<GPR, XMM>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>
source§fn i32_atomic_add_16u(
&mut self,
loc: Location<GPR, XMM>,
target: Location<GPR, XMM>,
memarg: &MemArg,
ret: Location<GPR, XMM>,
need_check: bool,
imported_memories: bool,
offset: i32,
heap_access_oob: DynamicLabel,
unaligned_atomic: DynamicLabel,
) -> Result<(), CompileError>
fn i32_atomic_add_16u( &mut self, loc: Location<GPR, XMM>, target: Location<GPR, XMM>, memarg: &MemArg, ret: Location<GPR, XMM>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>
source§fn i32_atomic_sub(
&mut self,
loc: Location<GPR, XMM>,
target: Location<GPR, XMM>,
memarg: &MemArg,
ret: Location<GPR, XMM>,
need_check: bool,
imported_memories: bool,
offset: i32,
heap_access_oob: DynamicLabel,
unaligned_atomic: DynamicLabel,
) -> Result<(), CompileError>
fn i32_atomic_sub( &mut self, loc: Location<GPR, XMM>, target: Location<GPR, XMM>, memarg: &MemArg, ret: Location<GPR, XMM>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>
source§fn i32_atomic_sub_8u(
&mut self,
loc: Location<GPR, XMM>,
target: Location<GPR, XMM>,
memarg: &MemArg,
ret: Location<GPR, XMM>,
need_check: bool,
imported_memories: bool,
offset: i32,
heap_access_oob: DynamicLabel,
unaligned_atomic: DynamicLabel,
) -> Result<(), CompileError>
fn i32_atomic_sub_8u( &mut self, loc: Location<GPR, XMM>, target: Location<GPR, XMM>, memarg: &MemArg, ret: Location<GPR, XMM>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>
source§fn i32_atomic_sub_16u(
&mut self,
loc: Location<GPR, XMM>,
target: Location<GPR, XMM>,
memarg: &MemArg,
ret: Location<GPR, XMM>,
need_check: bool,
imported_memories: bool,
offset: i32,
heap_access_oob: DynamicLabel,
unaligned_atomic: DynamicLabel,
) -> Result<(), CompileError>
fn i32_atomic_sub_16u( &mut self, loc: Location<GPR, XMM>, target: Location<GPR, XMM>, memarg: &MemArg, ret: Location<GPR, XMM>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>
source§fn i32_atomic_and(
&mut self,
loc: Location<GPR, XMM>,
target: Location<GPR, XMM>,
memarg: &MemArg,
ret: Location<GPR, XMM>,
need_check: bool,
imported_memories: bool,
offset: i32,
heap_access_oob: DynamicLabel,
unaligned_atomic: DynamicLabel,
) -> Result<(), CompileError>
fn i32_atomic_and( &mut self, loc: Location<GPR, XMM>, target: Location<GPR, XMM>, memarg: &MemArg, ret: Location<GPR, XMM>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>
source§fn i32_atomic_and_8u(
&mut self,
loc: Location<GPR, XMM>,
target: Location<GPR, XMM>,
memarg: &MemArg,
ret: Location<GPR, XMM>,
need_check: bool,
imported_memories: bool,
offset: i32,
heap_access_oob: DynamicLabel,
unaligned_atomic: DynamicLabel,
) -> Result<(), CompileError>
fn i32_atomic_and_8u( &mut self, loc: Location<GPR, XMM>, target: Location<GPR, XMM>, memarg: &MemArg, ret: Location<GPR, XMM>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>
source§fn i32_atomic_and_16u(
&mut self,
loc: Location<GPR, XMM>,
target: Location<GPR, XMM>,
memarg: &MemArg,
ret: Location<GPR, XMM>,
need_check: bool,
imported_memories: bool,
offset: i32,
heap_access_oob: DynamicLabel,
unaligned_atomic: DynamicLabel,
) -> Result<(), CompileError>
fn i32_atomic_and_16u( &mut self, loc: Location<GPR, XMM>, target: Location<GPR, XMM>, memarg: &MemArg, ret: Location<GPR, XMM>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>
source§fn i32_atomic_or(
&mut self,
loc: Location<GPR, XMM>,
target: Location<GPR, XMM>,
memarg: &MemArg,
ret: Location<GPR, XMM>,
need_check: bool,
imported_memories: bool,
offset: i32,
heap_access_oob: DynamicLabel,
unaligned_atomic: DynamicLabel,
) -> Result<(), CompileError>
fn i32_atomic_or( &mut self, loc: Location<GPR, XMM>, target: Location<GPR, XMM>, memarg: &MemArg, ret: Location<GPR, XMM>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>
source§fn i32_atomic_or_8u(
&mut self,
loc: Location<GPR, XMM>,
target: Location<GPR, XMM>,
memarg: &MemArg,
ret: Location<GPR, XMM>,
need_check: bool,
imported_memories: bool,
offset: i32,
heap_access_oob: DynamicLabel,
unaligned_atomic: DynamicLabel,
) -> Result<(), CompileError>
fn i32_atomic_or_8u( &mut self, loc: Location<GPR, XMM>, target: Location<GPR, XMM>, memarg: &MemArg, ret: Location<GPR, XMM>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>
source§fn i32_atomic_or_16u(
&mut self,
loc: Location<GPR, XMM>,
target: Location<GPR, XMM>,
memarg: &MemArg,
ret: Location<GPR, XMM>,
need_check: bool,
imported_memories: bool,
offset: i32,
heap_access_oob: DynamicLabel,
unaligned_atomic: DynamicLabel,
) -> Result<(), CompileError>
fn i32_atomic_or_16u( &mut self, loc: Location<GPR, XMM>, target: Location<GPR, XMM>, memarg: &MemArg, ret: Location<GPR, XMM>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>
source§fn i32_atomic_xor(
&mut self,
loc: Location<GPR, XMM>,
target: Location<GPR, XMM>,
memarg: &MemArg,
ret: Location<GPR, XMM>,
need_check: bool,
imported_memories: bool,
offset: i32,
heap_access_oob: DynamicLabel,
unaligned_atomic: DynamicLabel,
) -> Result<(), CompileError>
fn i32_atomic_xor( &mut self, loc: Location<GPR, XMM>, target: Location<GPR, XMM>, memarg: &MemArg, ret: Location<GPR, XMM>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>
source§fn i32_atomic_xor_8u(
&mut self,
loc: Location<GPR, XMM>,
target: Location<GPR, XMM>,
memarg: &MemArg,
ret: Location<GPR, XMM>,
need_check: bool,
imported_memories: bool,
offset: i32,
heap_access_oob: DynamicLabel,
unaligned_atomic: DynamicLabel,
) -> Result<(), CompileError>
fn i32_atomic_xor_8u( &mut self, loc: Location<GPR, XMM>, target: Location<GPR, XMM>, memarg: &MemArg, ret: Location<GPR, XMM>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>
source§fn i32_atomic_xor_16u(
&mut self,
loc: Location<GPR, XMM>,
target: Location<GPR, XMM>,
memarg: &MemArg,
ret: Location<GPR, XMM>,
need_check: bool,
imported_memories: bool,
offset: i32,
heap_access_oob: DynamicLabel,
unaligned_atomic: DynamicLabel,
) -> Result<(), CompileError>
fn i32_atomic_xor_16u( &mut self, loc: Location<GPR, XMM>, target: Location<GPR, XMM>, memarg: &MemArg, ret: Location<GPR, XMM>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>
source§fn i32_atomic_xchg(
&mut self,
loc: Location<GPR, XMM>,
target: Location<GPR, XMM>,
memarg: &MemArg,
ret: Location<GPR, XMM>,
need_check: bool,
imported_memories: bool,
offset: i32,
heap_access_oob: DynamicLabel,
unaligned_atomic: DynamicLabel,
) -> Result<(), CompileError>
fn i32_atomic_xchg( &mut self, loc: Location<GPR, XMM>, target: Location<GPR, XMM>, memarg: &MemArg, ret: Location<GPR, XMM>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>
source§fn i32_atomic_xchg_8u(
&mut self,
loc: Location<GPR, XMM>,
target: Location<GPR, XMM>,
memarg: &MemArg,
ret: Location<GPR, XMM>,
need_check: bool,
imported_memories: bool,
offset: i32,
heap_access_oob: DynamicLabel,
unaligned_atomic: DynamicLabel,
) -> Result<(), CompileError>
fn i32_atomic_xchg_8u( &mut self, loc: Location<GPR, XMM>, target: Location<GPR, XMM>, memarg: &MemArg, ret: Location<GPR, XMM>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>
source§fn i32_atomic_xchg_16u(
&mut self,
loc: Location<GPR, XMM>,
target: Location<GPR, XMM>,
memarg: &MemArg,
ret: Location<GPR, XMM>,
need_check: bool,
imported_memories: bool,
offset: i32,
heap_access_oob: DynamicLabel,
unaligned_atomic: DynamicLabel,
) -> Result<(), CompileError>
fn i32_atomic_xchg_16u( &mut self, loc: Location<GPR, XMM>, target: Location<GPR, XMM>, memarg: &MemArg, ret: Location<GPR, XMM>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>
source§fn i32_atomic_cmpxchg(
&mut self,
new: Location<GPR, XMM>,
cmp: Location<GPR, XMM>,
target: Location<GPR, XMM>,
memarg: &MemArg,
ret: Location<GPR, XMM>,
need_check: bool,
imported_memories: bool,
offset: i32,
heap_access_oob: DynamicLabel,
unaligned_atomic: DynamicLabel,
) -> Result<(), CompileError>
fn i32_atomic_cmpxchg( &mut self, new: Location<GPR, XMM>, cmp: Location<GPR, XMM>, target: Location<GPR, XMM>, memarg: &MemArg, ret: Location<GPR, XMM>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>
source§fn i32_atomic_cmpxchg_8u(
&mut self,
new: Location<GPR, XMM>,
cmp: Location<GPR, XMM>,
target: Location<GPR, XMM>,
memarg: &MemArg,
ret: Location<GPR, XMM>,
need_check: bool,
imported_memories: bool,
offset: i32,
heap_access_oob: DynamicLabel,
unaligned_atomic: DynamicLabel,
) -> Result<(), CompileError>
fn i32_atomic_cmpxchg_8u( &mut self, new: Location<GPR, XMM>, cmp: Location<GPR, XMM>, target: Location<GPR, XMM>, memarg: &MemArg, ret: Location<GPR, XMM>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>
source§fn i32_atomic_cmpxchg_16u(
&mut self,
new: Location<GPR, XMM>,
cmp: Location<GPR, XMM>,
target: Location<GPR, XMM>,
memarg: &MemArg,
ret: Location<GPR, XMM>,
need_check: bool,
imported_memories: bool,
offset: i32,
heap_access_oob: DynamicLabel,
unaligned_atomic: DynamicLabel,
) -> Result<(), CompileError>
fn i32_atomic_cmpxchg_16u( &mut self, new: Location<GPR, XMM>, cmp: Location<GPR, XMM>, target: Location<GPR, XMM>, memarg: &MemArg, ret: Location<GPR, XMM>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>
source§fn emit_call_with_reloc(
&mut self,
_calling_convention: CallingConvention,
reloc_target: RelocationTarget,
) -> Result<Vec<Relocation>, CompileError>
fn emit_call_with_reloc( &mut self, _calling_convention: CallingConvention, reloc_target: RelocationTarget, ) -> Result<Vec<Relocation>, CompileError>
source§fn emit_binop_add64(
&mut self,
loc_a: Location<GPR, XMM>,
loc_b: Location<GPR, XMM>,
ret: Location<GPR, XMM>,
) -> Result<(), CompileError>
fn emit_binop_add64( &mut self, loc_a: Location<GPR, XMM>, loc_b: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>
source§fn emit_binop_sub64(
&mut self,
loc_a: Location<GPR, XMM>,
loc_b: Location<GPR, XMM>,
ret: Location<GPR, XMM>,
) -> Result<(), CompileError>
fn emit_binop_sub64( &mut self, loc_a: Location<GPR, XMM>, loc_b: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>
source§fn emit_binop_mul64(
&mut self,
loc_a: Location<GPR, XMM>,
loc_b: Location<GPR, XMM>,
ret: Location<GPR, XMM>,
) -> Result<(), CompileError>
fn emit_binop_mul64( &mut self, loc_a: Location<GPR, XMM>, loc_b: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>
source§fn emit_binop_udiv64(
&mut self,
loc_a: Location<GPR, XMM>,
loc_b: Location<GPR, XMM>,
ret: Location<GPR, XMM>,
integer_division_by_zero: DynamicLabel,
_integer_overflow: DynamicLabel,
) -> Result<usize, CompileError>
fn emit_binop_udiv64( &mut self, loc_a: Location<GPR, XMM>, loc_b: Location<GPR, XMM>, ret: Location<GPR, XMM>, integer_division_by_zero: DynamicLabel, _integer_overflow: DynamicLabel, ) -> Result<usize, CompileError>
source§fn emit_binop_sdiv64(
&mut self,
loc_a: Location<GPR, XMM>,
loc_b: Location<GPR, XMM>,
ret: Location<GPR, XMM>,
integer_division_by_zero: DynamicLabel,
_integer_overflow: DynamicLabel,
) -> Result<usize, CompileError>
fn emit_binop_sdiv64( &mut self, loc_a: Location<GPR, XMM>, loc_b: Location<GPR, XMM>, ret: Location<GPR, XMM>, integer_division_by_zero: DynamicLabel, _integer_overflow: DynamicLabel, ) -> Result<usize, CompileError>
source§fn emit_binop_urem64(
&mut self,
loc_a: Location<GPR, XMM>,
loc_b: Location<GPR, XMM>,
ret: Location<GPR, XMM>,
integer_division_by_zero: DynamicLabel,
_integer_overflow: DynamicLabel,
) -> Result<usize, CompileError>
fn emit_binop_urem64( &mut self, loc_a: Location<GPR, XMM>, loc_b: Location<GPR, XMM>, ret: Location<GPR, XMM>, integer_division_by_zero: DynamicLabel, _integer_overflow: DynamicLabel, ) -> Result<usize, CompileError>
source§fn emit_binop_srem64(
&mut self,
loc_a: Location<GPR, XMM>,
loc_b: Location<GPR, XMM>,
ret: Location<GPR, XMM>,
integer_division_by_zero: DynamicLabel,
_integer_overflow: DynamicLabel,
) -> Result<usize, CompileError>
fn emit_binop_srem64( &mut self, loc_a: Location<GPR, XMM>, loc_b: Location<GPR, XMM>, ret: Location<GPR, XMM>, integer_division_by_zero: DynamicLabel, _integer_overflow: DynamicLabel, ) -> Result<usize, CompileError>
source§fn emit_binop_and64(
&mut self,
loc_a: Location<GPR, XMM>,
loc_b: Location<GPR, XMM>,
ret: Location<GPR, XMM>,
) -> Result<(), CompileError>
fn emit_binop_and64( &mut self, loc_a: Location<GPR, XMM>, loc_b: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>
source§fn emit_binop_or64(
&mut self,
loc_a: Location<GPR, XMM>,
loc_b: Location<GPR, XMM>,
ret: Location<GPR, XMM>,
) -> Result<(), CompileError>
fn emit_binop_or64( &mut self, loc_a: Location<GPR, XMM>, loc_b: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>
source§fn emit_binop_xor64(
&mut self,
loc_a: Location<GPR, XMM>,
loc_b: Location<GPR, XMM>,
ret: Location<GPR, XMM>,
) -> Result<(), CompileError>
fn emit_binop_xor64( &mut self, loc_a: Location<GPR, XMM>, loc_b: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>
source§fn i64_cmp_ge_s(
&mut self,
loc_a: Location<GPR, XMM>,
loc_b: Location<GPR, XMM>,
ret: Location<GPR, XMM>,
) -> Result<(), CompileError>
fn i64_cmp_ge_s( &mut self, loc_a: Location<GPR, XMM>, loc_b: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>
source§fn i64_cmp_gt_s(
&mut self,
loc_a: Location<GPR, XMM>,
loc_b: Location<GPR, XMM>,
ret: Location<GPR, XMM>,
) -> Result<(), CompileError>
fn i64_cmp_gt_s( &mut self, loc_a: Location<GPR, XMM>, loc_b: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>
source§fn i64_cmp_le_s(
&mut self,
loc_a: Location<GPR, XMM>,
loc_b: Location<GPR, XMM>,
ret: Location<GPR, XMM>,
) -> Result<(), CompileError>
fn i64_cmp_le_s( &mut self, loc_a: Location<GPR, XMM>, loc_b: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>
source§fn i64_cmp_lt_s(
&mut self,
loc_a: Location<GPR, XMM>,
loc_b: Location<GPR, XMM>,
ret: Location<GPR, XMM>,
) -> Result<(), CompileError>
fn i64_cmp_lt_s( &mut self, loc_a: Location<GPR, XMM>, loc_b: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>
source§fn i64_cmp_ge_u(
&mut self,
loc_a: Location<GPR, XMM>,
loc_b: Location<GPR, XMM>,
ret: Location<GPR, XMM>,
) -> Result<(), CompileError>
fn i64_cmp_ge_u( &mut self, loc_a: Location<GPR, XMM>, loc_b: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>
source§fn i64_cmp_gt_u(
&mut self,
loc_a: Location<GPR, XMM>,
loc_b: Location<GPR, XMM>,
ret: Location<GPR, XMM>,
) -> Result<(), CompileError>
fn i64_cmp_gt_u( &mut self, loc_a: Location<GPR, XMM>, loc_b: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>
source§fn i64_cmp_le_u(
&mut self,
loc_a: Location<GPR, XMM>,
loc_b: Location<GPR, XMM>,
ret: Location<GPR, XMM>,
) -> Result<(), CompileError>
fn i64_cmp_le_u( &mut self, loc_a: Location<GPR, XMM>, loc_b: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>
source§fn i64_cmp_lt_u(
&mut self,
loc_a: Location<GPR, XMM>,
loc_b: Location<GPR, XMM>,
ret: Location<GPR, XMM>,
) -> Result<(), CompileError>
fn i64_cmp_lt_u( &mut self, loc_a: Location<GPR, XMM>, loc_b: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>
source§fn i64_cmp_ne(
&mut self,
loc_a: Location<GPR, XMM>,
loc_b: Location<GPR, XMM>,
ret: Location<GPR, XMM>,
) -> Result<(), CompileError>
fn i64_cmp_ne( &mut self, loc_a: Location<GPR, XMM>, loc_b: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>
source§fn i64_cmp_eq(
&mut self,
loc_a: Location<GPR, XMM>,
loc_b: Location<GPR, XMM>,
ret: Location<GPR, XMM>,
) -> Result<(), CompileError>
fn i64_cmp_eq( &mut self, loc_a: Location<GPR, XMM>, loc_b: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>
source§fn i64_clz(
&mut self,
loc: Location<GPR, XMM>,
ret: Location<GPR, XMM>,
) -> Result<(), CompileError>
fn i64_clz( &mut self, loc: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>
source§fn i64_ctz(
&mut self,
loc: Location<GPR, XMM>,
ret: Location<GPR, XMM>,
) -> Result<(), CompileError>
fn i64_ctz( &mut self, loc: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>
source§fn i64_popcnt(
&mut self,
loc: Location<GPR, XMM>,
ret: Location<GPR, XMM>,
) -> Result<(), CompileError>
fn i64_popcnt( &mut self, loc: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>
source§fn i64_shl(
&mut self,
loc_a: Location<GPR, XMM>,
loc_b: Location<GPR, XMM>,
ret: Location<GPR, XMM>,
) -> Result<(), CompileError>
fn i64_shl( &mut self, loc_a: Location<GPR, XMM>, loc_b: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>
source§fn i64_shr(
&mut self,
loc_a: Location<GPR, XMM>,
loc_b: Location<GPR, XMM>,
ret: Location<GPR, XMM>,
) -> Result<(), CompileError>
fn i64_shr( &mut self, loc_a: Location<GPR, XMM>, loc_b: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>
source§fn i64_sar(
&mut self,
loc_a: Location<GPR, XMM>,
loc_b: Location<GPR, XMM>,
ret: Location<GPR, XMM>,
) -> Result<(), CompileError>
fn i64_sar( &mut self, loc_a: Location<GPR, XMM>, loc_b: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>
source§fn i64_rol(
&mut self,
loc_a: Location<GPR, XMM>,
loc_b: Location<GPR, XMM>,
ret: Location<GPR, XMM>,
) -> Result<(), CompileError>
fn i64_rol( &mut self, loc_a: Location<GPR, XMM>, loc_b: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>
source§fn i64_ror(
&mut self,
loc_a: Location<GPR, XMM>,
loc_b: Location<GPR, XMM>,
ret: Location<GPR, XMM>,
) -> Result<(), CompileError>
fn i64_ror( &mut self, loc_a: Location<GPR, XMM>, loc_b: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>
source§fn i64_load(
&mut self,
addr: Location<GPR, XMM>,
memarg: &MemArg,
ret: Location<GPR, XMM>,
need_check: bool,
imported_memories: bool,
offset: i32,
heap_access_oob: DynamicLabel,
unaligned_atomic: DynamicLabel,
) -> Result<(), CompileError>
fn i64_load( &mut self, addr: Location<GPR, XMM>, memarg: &MemArg, ret: Location<GPR, XMM>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>
source§fn i64_load_8u(
&mut self,
addr: Location<GPR, XMM>,
memarg: &MemArg,
ret: Location<GPR, XMM>,
need_check: bool,
imported_memories: bool,
offset: i32,
heap_access_oob: DynamicLabel,
unaligned_atomic: DynamicLabel,
) -> Result<(), CompileError>
fn i64_load_8u( &mut self, addr: Location<GPR, XMM>, memarg: &MemArg, ret: Location<GPR, XMM>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>
source§fn i64_load_8s(
&mut self,
addr: Location<GPR, XMM>,
memarg: &MemArg,
ret: Location<GPR, XMM>,
need_check: bool,
imported_memories: bool,
offset: i32,
heap_access_oob: DynamicLabel,
unaligned_atomic: DynamicLabel,
) -> Result<(), CompileError>
fn i64_load_8s( &mut self, addr: Location<GPR, XMM>, memarg: &MemArg, ret: Location<GPR, XMM>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>
source§fn i64_load_16u(
&mut self,
addr: Location<GPR, XMM>,
memarg: &MemArg,
ret: Location<GPR, XMM>,
need_check: bool,
imported_memories: bool,
offset: i32,
heap_access_oob: DynamicLabel,
unaligned_atomic: DynamicLabel,
) -> Result<(), CompileError>
fn i64_load_16u( &mut self, addr: Location<GPR, XMM>, memarg: &MemArg, ret: Location<GPR, XMM>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>
source§fn i64_load_16s(
&mut self,
addr: Location<GPR, XMM>,
memarg: &MemArg,
ret: Location<GPR, XMM>,
need_check: bool,
imported_memories: bool,
offset: i32,
heap_access_oob: DynamicLabel,
unaligned_atomic: DynamicLabel,
) -> Result<(), CompileError>
fn i64_load_16s( &mut self, addr: Location<GPR, XMM>, memarg: &MemArg, ret: Location<GPR, XMM>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>
source§fn i64_load_32u(
&mut self,
addr: Location<GPR, XMM>,
memarg: &MemArg,
ret: Location<GPR, XMM>,
need_check: bool,
imported_memories: bool,
offset: i32,
heap_access_oob: DynamicLabel,
unaligned_atomic: DynamicLabel,
) -> Result<(), CompileError>
fn i64_load_32u( &mut self, addr: Location<GPR, XMM>, memarg: &MemArg, ret: Location<GPR, XMM>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>
source§fn i64_load_32s(
&mut self,
addr: Location<GPR, XMM>,
memarg: &MemArg,
ret: Location<GPR, XMM>,
need_check: bool,
imported_memories: bool,
offset: i32,
heap_access_oob: DynamicLabel,
unaligned_atomic: DynamicLabel,
) -> Result<(), CompileError>
fn i64_load_32s( &mut self, addr: Location<GPR, XMM>, memarg: &MemArg, ret: Location<GPR, XMM>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>
source§fn i64_atomic_load(
&mut self,
addr: Location<GPR, XMM>,
memarg: &MemArg,
ret: Location<GPR, XMM>,
need_check: bool,
imported_memories: bool,
offset: i32,
heap_access_oob: DynamicLabel,
unaligned_atomic: DynamicLabel,
) -> Result<(), CompileError>
fn i64_atomic_load( &mut self, addr: Location<GPR, XMM>, memarg: &MemArg, ret: Location<GPR, XMM>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>
source§fn i64_atomic_load_8u(
&mut self,
addr: Location<GPR, XMM>,
memarg: &MemArg,
ret: Location<GPR, XMM>,
need_check: bool,
imported_memories: bool,
offset: i32,
heap_access_oob: DynamicLabel,
unaligned_atomic: DynamicLabel,
) -> Result<(), CompileError>
fn i64_atomic_load_8u( &mut self, addr: Location<GPR, XMM>, memarg: &MemArg, ret: Location<GPR, XMM>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>
source§fn i64_atomic_load_16u(
&mut self,
addr: Location<GPR, XMM>,
memarg: &MemArg,
ret: Location<GPR, XMM>,
need_check: bool,
imported_memories: bool,
offset: i32,
heap_access_oob: DynamicLabel,
unaligned_atomic: DynamicLabel,
) -> Result<(), CompileError>
fn i64_atomic_load_16u( &mut self, addr: Location<GPR, XMM>, memarg: &MemArg, ret: Location<GPR, XMM>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>
source§fn i64_atomic_load_32u(
&mut self,
addr: Location<GPR, XMM>,
memarg: &MemArg,
ret: Location<GPR, XMM>,
need_check: bool,
imported_memories: bool,
offset: i32,
heap_access_oob: DynamicLabel,
unaligned_atomic: DynamicLabel,
) -> Result<(), CompileError>
fn i64_atomic_load_32u( &mut self, addr: Location<GPR, XMM>, memarg: &MemArg, ret: Location<GPR, XMM>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>
source§fn i64_save(
&mut self,
target_value: Location<GPR, XMM>,
memarg: &MemArg,
target_addr: Location<GPR, XMM>,
need_check: bool,
imported_memories: bool,
offset: i32,
heap_access_oob: DynamicLabel,
unaligned_atomic: DynamicLabel,
) -> Result<(), CompileError>
fn i64_save( &mut self, target_value: Location<GPR, XMM>, memarg: &MemArg, target_addr: Location<GPR, XMM>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>
source§fn i64_save_8(
&mut self,
target_value: Location<GPR, XMM>,
memarg: &MemArg,
target_addr: Location<GPR, XMM>,
need_check: bool,
imported_memories: bool,
offset: i32,
heap_access_oob: DynamicLabel,
unaligned_atomic: DynamicLabel,
) -> Result<(), CompileError>
fn i64_save_8( &mut self, target_value: Location<GPR, XMM>, memarg: &MemArg, target_addr: Location<GPR, XMM>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>
source§fn i64_save_16(
&mut self,
target_value: Location<GPR, XMM>,
memarg: &MemArg,
target_addr: Location<GPR, XMM>,
need_check: bool,
imported_memories: bool,
offset: i32,
heap_access_oob: DynamicLabel,
unaligned_atomic: DynamicLabel,
) -> Result<(), CompileError>
fn i64_save_16( &mut self, target_value: Location<GPR, XMM>, memarg: &MemArg, target_addr: Location<GPR, XMM>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>
source§fn i64_save_32(
&mut self,
target_value: Location<GPR, XMM>,
memarg: &MemArg,
target_addr: Location<GPR, XMM>,
need_check: bool,
imported_memories: bool,
offset: i32,
heap_access_oob: DynamicLabel,
unaligned_atomic: DynamicLabel,
) -> Result<(), CompileError>
fn i64_save_32( &mut self, target_value: Location<GPR, XMM>, memarg: &MemArg, target_addr: Location<GPR, XMM>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>
source§fn i64_atomic_save(
&mut self,
value: Location<GPR, XMM>,
memarg: &MemArg,
target_addr: Location<GPR, XMM>,
need_check: bool,
imported_memories: bool,
offset: i32,
heap_access_oob: DynamicLabel,
unaligned_atomic: DynamicLabel,
) -> Result<(), CompileError>
fn i64_atomic_save( &mut self, value: Location<GPR, XMM>, memarg: &MemArg, target_addr: Location<GPR, XMM>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>
source§fn i64_atomic_save_8(
&mut self,
value: Location<GPR, XMM>,
memarg: &MemArg,
target_addr: Location<GPR, XMM>,
need_check: bool,
imported_memories: bool,
offset: i32,
heap_access_oob: DynamicLabel,
unaligned_atomic: DynamicLabel,
) -> Result<(), CompileError>
fn i64_atomic_save_8( &mut self, value: Location<GPR, XMM>, memarg: &MemArg, target_addr: Location<GPR, XMM>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>
source§fn i64_atomic_save_16(
&mut self,
value: Location<GPR, XMM>,
memarg: &MemArg,
target_addr: Location<GPR, XMM>,
need_check: bool,
imported_memories: bool,
offset: i32,
heap_access_oob: DynamicLabel,
unaligned_atomic: DynamicLabel,
) -> Result<(), CompileError>
fn i64_atomic_save_16( &mut self, value: Location<GPR, XMM>, memarg: &MemArg, target_addr: Location<GPR, XMM>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>
source§fn i64_atomic_save_32(
&mut self,
value: Location<GPR, XMM>,
memarg: &MemArg,
target_addr: Location<GPR, XMM>,
need_check: bool,
imported_memories: bool,
offset: i32,
heap_access_oob: DynamicLabel,
unaligned_atomic: DynamicLabel,
) -> Result<(), CompileError>
fn i64_atomic_save_32( &mut self, value: Location<GPR, XMM>, memarg: &MemArg, target_addr: Location<GPR, XMM>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>
source§fn i64_atomic_add(
&mut self,
loc: Location<GPR, XMM>,
target: Location<GPR, XMM>,
memarg: &MemArg,
ret: Location<GPR, XMM>,
need_check: bool,
imported_memories: bool,
offset: i32,
heap_access_oob: DynamicLabel,
unaligned_atomic: DynamicLabel,
) -> Result<(), CompileError>
fn i64_atomic_add( &mut self, loc: Location<GPR, XMM>, target: Location<GPR, XMM>, memarg: &MemArg, ret: Location<GPR, XMM>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>
source§fn i64_atomic_add_8u(
&mut self,
loc: Location<GPR, XMM>,
target: Location<GPR, XMM>,
memarg: &MemArg,
ret: Location<GPR, XMM>,
need_check: bool,
imported_memories: bool,
offset: i32,
heap_access_oob: DynamicLabel,
unaligned_atomic: DynamicLabel,
) -> Result<(), CompileError>
fn i64_atomic_add_8u( &mut self, loc: Location<GPR, XMM>, target: Location<GPR, XMM>, memarg: &MemArg, ret: Location<GPR, XMM>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>
source§fn i64_atomic_add_16u(
&mut self,
loc: Location<GPR, XMM>,
target: Location<GPR, XMM>,
memarg: &MemArg,
ret: Location<GPR, XMM>,
need_check: bool,
imported_memories: bool,
offset: i32,
heap_access_oob: DynamicLabel,
unaligned_atomic: DynamicLabel,
) -> Result<(), CompileError>
fn i64_atomic_add_16u( &mut self, loc: Location<GPR, XMM>, target: Location<GPR, XMM>, memarg: &MemArg, ret: Location<GPR, XMM>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>
source§fn i64_atomic_add_32u(
&mut self,
loc: Location<GPR, XMM>,
target: Location<GPR, XMM>,
memarg: &MemArg,
ret: Location<GPR, XMM>,
need_check: bool,
imported_memories: bool,
offset: i32,
heap_access_oob: DynamicLabel,
unaligned_atomic: DynamicLabel,
) -> Result<(), CompileError>
fn i64_atomic_add_32u( &mut self, loc: Location<GPR, XMM>, target: Location<GPR, XMM>, memarg: &MemArg, ret: Location<GPR, XMM>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>
source§fn i64_atomic_sub(
&mut self,
loc: Location<GPR, XMM>,
target: Location<GPR, XMM>,
memarg: &MemArg,
ret: Location<GPR, XMM>,
need_check: bool,
imported_memories: bool,
offset: i32,
heap_access_oob: DynamicLabel,
unaligned_atomic: DynamicLabel,
) -> Result<(), CompileError>
fn i64_atomic_sub( &mut self, loc: Location<GPR, XMM>, target: Location<GPR, XMM>, memarg: &MemArg, ret: Location<GPR, XMM>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>
source§fn i64_atomic_sub_8u(
&mut self,
loc: Location<GPR, XMM>,
target: Location<GPR, XMM>,
memarg: &MemArg,
ret: Location<GPR, XMM>,
need_check: bool,
imported_memories: bool,
offset: i32,
heap_access_oob: DynamicLabel,
unaligned_atomic: DynamicLabel,
) -> Result<(), CompileError>
fn i64_atomic_sub_8u( &mut self, loc: Location<GPR, XMM>, target: Location<GPR, XMM>, memarg: &MemArg, ret: Location<GPR, XMM>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>
source§fn i64_atomic_sub_16u(
&mut self,
loc: Location<GPR, XMM>,
target: Location<GPR, XMM>,
memarg: &MemArg,
ret: Location<GPR, XMM>,
need_check: bool,
imported_memories: bool,
offset: i32,
heap_access_oob: DynamicLabel,
unaligned_atomic: DynamicLabel,
) -> Result<(), CompileError>
fn i64_atomic_sub_16u( &mut self, loc: Location<GPR, XMM>, target: Location<GPR, XMM>, memarg: &MemArg, ret: Location<GPR, XMM>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>
source§fn i64_atomic_sub_32u(
&mut self,
loc: Location<GPR, XMM>,
target: Location<GPR, XMM>,
memarg: &MemArg,
ret: Location<GPR, XMM>,
need_check: bool,
imported_memories: bool,
offset: i32,
heap_access_oob: DynamicLabel,
unaligned_atomic: DynamicLabel,
) -> Result<(), CompileError>
fn i64_atomic_sub_32u( &mut self, loc: Location<GPR, XMM>, target: Location<GPR, XMM>, memarg: &MemArg, ret: Location<GPR, XMM>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>
source§fn i64_atomic_and(
&mut self,
loc: Location<GPR, XMM>,
target: Location<GPR, XMM>,
memarg: &MemArg,
ret: Location<GPR, XMM>,
need_check: bool,
imported_memories: bool,
offset: i32,
heap_access_oob: DynamicLabel,
unaligned_atomic: DynamicLabel,
) -> Result<(), CompileError>
fn i64_atomic_and( &mut self, loc: Location<GPR, XMM>, target: Location<GPR, XMM>, memarg: &MemArg, ret: Location<GPR, XMM>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>
source§fn i64_atomic_and_8u(
&mut self,
loc: Location<GPR, XMM>,
target: Location<GPR, XMM>,
memarg: &MemArg,
ret: Location<GPR, XMM>,
need_check: bool,
imported_memories: bool,
offset: i32,
heap_access_oob: DynamicLabel,
unaligned_atomic: DynamicLabel,
) -> Result<(), CompileError>
fn i64_atomic_and_8u( &mut self, loc: Location<GPR, XMM>, target: Location<GPR, XMM>, memarg: &MemArg, ret: Location<GPR, XMM>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>
source§fn i64_atomic_and_16u(
&mut self,
loc: Location<GPR, XMM>,
target: Location<GPR, XMM>,
memarg: &MemArg,
ret: Location<GPR, XMM>,
need_check: bool,
imported_memories: bool,
offset: i32,
heap_access_oob: DynamicLabel,
unaligned_atomic: DynamicLabel,
) -> Result<(), CompileError>
fn i64_atomic_and_16u( &mut self, loc: Location<GPR, XMM>, target: Location<GPR, XMM>, memarg: &MemArg, ret: Location<GPR, XMM>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>
source§fn i64_atomic_and_32u(
&mut self,
loc: Location<GPR, XMM>,
target: Location<GPR, XMM>,
memarg: &MemArg,
ret: Location<GPR, XMM>,
need_check: bool,
imported_memories: bool,
offset: i32,
heap_access_oob: DynamicLabel,
unaligned_atomic: DynamicLabel,
) -> Result<(), CompileError>
fn i64_atomic_and_32u( &mut self, loc: Location<GPR, XMM>, target: Location<GPR, XMM>, memarg: &MemArg, ret: Location<GPR, XMM>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>
source§fn i64_atomic_or(
&mut self,
loc: Location<GPR, XMM>,
target: Location<GPR, XMM>,
memarg: &MemArg,
ret: Location<GPR, XMM>,
need_check: bool,
imported_memories: bool,
offset: i32,
heap_access_oob: DynamicLabel,
unaligned_atomic: DynamicLabel,
) -> Result<(), CompileError>
fn i64_atomic_or( &mut self, loc: Location<GPR, XMM>, target: Location<GPR, XMM>, memarg: &MemArg, ret: Location<GPR, XMM>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>
source§fn i64_atomic_or_8u(
&mut self,
loc: Location<GPR, XMM>,
target: Location<GPR, XMM>,
memarg: &MemArg,
ret: Location<GPR, XMM>,
need_check: bool,
imported_memories: bool,
offset: i32,
heap_access_oob: DynamicLabel,
unaligned_atomic: DynamicLabel,
) -> Result<(), CompileError>
fn i64_atomic_or_8u( &mut self, loc: Location<GPR, XMM>, target: Location<GPR, XMM>, memarg: &MemArg, ret: Location<GPR, XMM>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>
source§fn i64_atomic_or_16u(
&mut self,
loc: Location<GPR, XMM>,
target: Location<GPR, XMM>,
memarg: &MemArg,
ret: Location<GPR, XMM>,
need_check: bool,
imported_memories: bool,
offset: i32,
heap_access_oob: DynamicLabel,
unaligned_atomic: DynamicLabel,
) -> Result<(), CompileError>
fn i64_atomic_or_16u( &mut self, loc: Location<GPR, XMM>, target: Location<GPR, XMM>, memarg: &MemArg, ret: Location<GPR, XMM>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>
source§fn i64_atomic_or_32u(
&mut self,
loc: Location<GPR, XMM>,
target: Location<GPR, XMM>,
memarg: &MemArg,
ret: Location<GPR, XMM>,
need_check: bool,
imported_memories: bool,
offset: i32,
heap_access_oob: DynamicLabel,
unaligned_atomic: DynamicLabel,
) -> Result<(), CompileError>
fn i64_atomic_or_32u( &mut self, loc: Location<GPR, XMM>, target: Location<GPR, XMM>, memarg: &MemArg, ret: Location<GPR, XMM>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>
source§fn i64_atomic_xor(
&mut self,
loc: Location<GPR, XMM>,
target: Location<GPR, XMM>,
memarg: &MemArg,
ret: Location<GPR, XMM>,
need_check: bool,
imported_memories: bool,
offset: i32,
heap_access_oob: DynamicLabel,
unaligned_atomic: DynamicLabel,
) -> Result<(), CompileError>
fn i64_atomic_xor( &mut self, loc: Location<GPR, XMM>, target: Location<GPR, XMM>, memarg: &MemArg, ret: Location<GPR, XMM>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>
source§fn i64_atomic_xor_8u(
&mut self,
loc: Location<GPR, XMM>,
target: Location<GPR, XMM>,
memarg: &MemArg,
ret: Location<GPR, XMM>,
need_check: bool,
imported_memories: bool,
offset: i32,
heap_access_oob: DynamicLabel,
unaligned_atomic: DynamicLabel,
) -> Result<(), CompileError>
fn i64_atomic_xor_8u( &mut self, loc: Location<GPR, XMM>, target: Location<GPR, XMM>, memarg: &MemArg, ret: Location<GPR, XMM>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>
source§fn i64_atomic_xor_16u(
&mut self,
loc: Location<GPR, XMM>,
target: Location<GPR, XMM>,
memarg: &MemArg,
ret: Location<GPR, XMM>,
need_check: bool,
imported_memories: bool,
offset: i32,
heap_access_oob: DynamicLabel,
unaligned_atomic: DynamicLabel,
) -> Result<(), CompileError>
fn i64_atomic_xor_16u( &mut self, loc: Location<GPR, XMM>, target: Location<GPR, XMM>, memarg: &MemArg, ret: Location<GPR, XMM>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>
source§fn i64_atomic_xor_32u(
&mut self,
loc: Location<GPR, XMM>,
target: Location<GPR, XMM>,
memarg: &MemArg,
ret: Location<GPR, XMM>,
need_check: bool,
imported_memories: bool,
offset: i32,
heap_access_oob: DynamicLabel,
unaligned_atomic: DynamicLabel,
) -> Result<(), CompileError>
fn i64_atomic_xor_32u( &mut self, loc: Location<GPR, XMM>, target: Location<GPR, XMM>, memarg: &MemArg, ret: Location<GPR, XMM>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>
source§fn i64_atomic_xchg(
&mut self,
loc: Location<GPR, XMM>,
target: Location<GPR, XMM>,
memarg: &MemArg,
ret: Location<GPR, XMM>,
need_check: bool,
imported_memories: bool,
offset: i32,
heap_access_oob: DynamicLabel,
unaligned_atomic: DynamicLabel,
) -> Result<(), CompileError>
fn i64_atomic_xchg( &mut self, loc: Location<GPR, XMM>, target: Location<GPR, XMM>, memarg: &MemArg, ret: Location<GPR, XMM>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>
source§fn i64_atomic_xchg_8u(
&mut self,
loc: Location<GPR, XMM>,
target: Location<GPR, XMM>,
memarg: &MemArg,
ret: Location<GPR, XMM>,
need_check: bool,
imported_memories: bool,
offset: i32,
heap_access_oob: DynamicLabel,
unaligned_atomic: DynamicLabel,
) -> Result<(), CompileError>
fn i64_atomic_xchg_8u( &mut self, loc: Location<GPR, XMM>, target: Location<GPR, XMM>, memarg: &MemArg, ret: Location<GPR, XMM>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>
source§fn i64_atomic_xchg_16u(
&mut self,
loc: Location<GPR, XMM>,
target: Location<GPR, XMM>,
memarg: &MemArg,
ret: Location<GPR, XMM>,
need_check: bool,
imported_memories: bool,
offset: i32,
heap_access_oob: DynamicLabel,
unaligned_atomic: DynamicLabel,
) -> Result<(), CompileError>
fn i64_atomic_xchg_16u( &mut self, loc: Location<GPR, XMM>, target: Location<GPR, XMM>, memarg: &MemArg, ret: Location<GPR, XMM>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>
source§fn i64_atomic_xchg_32u(
&mut self,
loc: Location<GPR, XMM>,
target: Location<GPR, XMM>,
memarg: &MemArg,
ret: Location<GPR, XMM>,
need_check: bool,
imported_memories: bool,
offset: i32,
heap_access_oob: DynamicLabel,
unaligned_atomic: DynamicLabel,
) -> Result<(), CompileError>
fn i64_atomic_xchg_32u( &mut self, loc: Location<GPR, XMM>, target: Location<GPR, XMM>, memarg: &MemArg, ret: Location<GPR, XMM>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>
source§fn i64_atomic_cmpxchg(
&mut self,
new: Location<GPR, XMM>,
cmp: Location<GPR, XMM>,
target: Location<GPR, XMM>,
memarg: &MemArg,
ret: Location<GPR, XMM>,
need_check: bool,
imported_memories: bool,
offset: i32,
heap_access_oob: DynamicLabel,
unaligned_atomic: DynamicLabel,
) -> Result<(), CompileError>
fn i64_atomic_cmpxchg( &mut self, new: Location<GPR, XMM>, cmp: Location<GPR, XMM>, target: Location<GPR, XMM>, memarg: &MemArg, ret: Location<GPR, XMM>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>
source§fn i64_atomic_cmpxchg_8u(
&mut self,
new: Location<GPR, XMM>,
cmp: Location<GPR, XMM>,
target: Location<GPR, XMM>,
memarg: &MemArg,
ret: Location<GPR, XMM>,
need_check: bool,
imported_memories: bool,
offset: i32,
heap_access_oob: DynamicLabel,
unaligned_atomic: DynamicLabel,
) -> Result<(), CompileError>
fn i64_atomic_cmpxchg_8u( &mut self, new: Location<GPR, XMM>, cmp: Location<GPR, XMM>, target: Location<GPR, XMM>, memarg: &MemArg, ret: Location<GPR, XMM>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>
source§fn i64_atomic_cmpxchg_16u(
&mut self,
new: Location<GPR, XMM>,
cmp: Location<GPR, XMM>,
target: Location<GPR, XMM>,
memarg: &MemArg,
ret: Location<GPR, XMM>,
need_check: bool,
imported_memories: bool,
offset: i32,
heap_access_oob: DynamicLabel,
unaligned_atomic: DynamicLabel,
) -> Result<(), CompileError>
fn i64_atomic_cmpxchg_16u( &mut self, new: Location<GPR, XMM>, cmp: Location<GPR, XMM>, target: Location<GPR, XMM>, memarg: &MemArg, ret: Location<GPR, XMM>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>
source§fn i64_atomic_cmpxchg_32u(
&mut self,
new: Location<GPR, XMM>,
cmp: Location<GPR, XMM>,
target: Location<GPR, XMM>,
memarg: &MemArg,
ret: Location<GPR, XMM>,
need_check: bool,
imported_memories: bool,
offset: i32,
heap_access_oob: DynamicLabel,
unaligned_atomic: DynamicLabel,
) -> Result<(), CompileError>
fn i64_atomic_cmpxchg_32u( &mut self, new: Location<GPR, XMM>, cmp: Location<GPR, XMM>, target: Location<GPR, XMM>, memarg: &MemArg, ret: Location<GPR, XMM>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>
source§fn f32_load(
&mut self,
addr: Location<GPR, XMM>,
memarg: &MemArg,
ret: Location<GPR, XMM>,
need_check: bool,
imported_memories: bool,
offset: i32,
heap_access_oob: DynamicLabel,
unaligned_atomic: DynamicLabel,
) -> Result<(), CompileError>
fn f32_load( &mut self, addr: Location<GPR, XMM>, memarg: &MemArg, ret: Location<GPR, XMM>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>
source§fn f32_save(
&mut self,
target_value: Location<GPR, XMM>,
memarg: &MemArg,
target_addr: Location<GPR, XMM>,
canonicalize: bool,
need_check: bool,
imported_memories: bool,
offset: i32,
heap_access_oob: DynamicLabel,
unaligned_atomic: DynamicLabel,
) -> Result<(), CompileError>
fn f32_save( &mut self, target_value: Location<GPR, XMM>, memarg: &MemArg, target_addr: Location<GPR, XMM>, canonicalize: bool, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>
source§fn f64_load(
&mut self,
addr: Location<GPR, XMM>,
memarg: &MemArg,
ret: Location<GPR, XMM>,
need_check: bool,
imported_memories: bool,
offset: i32,
heap_access_oob: DynamicLabel,
unaligned_atomic: DynamicLabel,
) -> Result<(), CompileError>
fn f64_load( &mut self, addr: Location<GPR, XMM>, memarg: &MemArg, ret: Location<GPR, XMM>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>
source§fn f64_save(
&mut self,
target_value: Location<GPR, XMM>,
memarg: &MemArg,
target_addr: Location<GPR, XMM>,
canonicalize: bool,
need_check: bool,
imported_memories: bool,
offset: i32,
heap_access_oob: DynamicLabel,
unaligned_atomic: DynamicLabel,
) -> Result<(), CompileError>
fn f64_save( &mut self, target_value: Location<GPR, XMM>, memarg: &MemArg, target_addr: Location<GPR, XMM>, canonicalize: bool, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>
source§fn convert_f64_i64(
&mut self,
loc: Location<GPR, XMM>,
signed: bool,
ret: Location<GPR, XMM>,
) -> Result<(), CompileError>
fn convert_f64_i64( &mut self, loc: Location<GPR, XMM>, signed: bool, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>
source§fn convert_f64_i32(
&mut self,
loc: Location<GPR, XMM>,
signed: bool,
ret: Location<GPR, XMM>,
) -> Result<(), CompileError>
fn convert_f64_i32( &mut self, loc: Location<GPR, XMM>, signed: bool, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>
source§fn convert_f32_i64(
&mut self,
loc: Location<GPR, XMM>,
signed: bool,
ret: Location<GPR, XMM>,
) -> Result<(), CompileError>
fn convert_f32_i64( &mut self, loc: Location<GPR, XMM>, signed: bool, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>
source§fn convert_f32_i32(
&mut self,
loc: Location<GPR, XMM>,
signed: bool,
ret: Location<GPR, XMM>,
) -> Result<(), CompileError>
fn convert_f32_i32( &mut self, loc: Location<GPR, XMM>, signed: bool, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>
source§fn convert_i64_f64(
&mut self,
loc: Location<GPR, XMM>,
ret: Location<GPR, XMM>,
signed: bool,
sat: bool,
) -> Result<(), CompileError>
fn convert_i64_f64( &mut self, loc: Location<GPR, XMM>, ret: Location<GPR, XMM>, signed: bool, sat: bool, ) -> Result<(), CompileError>
source§fn convert_i32_f64(
&mut self,
loc: Location<GPR, XMM>,
ret: Location<GPR, XMM>,
signed: bool,
sat: bool,
) -> Result<(), CompileError>
fn convert_i32_f64( &mut self, loc: Location<GPR, XMM>, ret: Location<GPR, XMM>, signed: bool, sat: bool, ) -> Result<(), CompileError>
source§fn convert_i64_f32(
&mut self,
loc: Location<GPR, XMM>,
ret: Location<GPR, XMM>,
signed: bool,
sat: bool,
) -> Result<(), CompileError>
fn convert_i64_f32( &mut self, loc: Location<GPR, XMM>, ret: Location<GPR, XMM>, signed: bool, sat: bool, ) -> Result<(), CompileError>
source§fn convert_i32_f32(
&mut self,
loc: Location<GPR, XMM>,
ret: Location<GPR, XMM>,
signed: bool,
sat: bool,
) -> Result<(), CompileError>
fn convert_i32_f32( &mut self, loc: Location<GPR, XMM>, ret: Location<GPR, XMM>, signed: bool, sat: bool, ) -> Result<(), CompileError>
source§fn convert_f64_f32(
&mut self,
loc: Location<GPR, XMM>,
ret: Location<GPR, XMM>,
) -> Result<(), CompileError>
fn convert_f64_f32( &mut self, loc: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>
source§fn convert_f32_f64(
&mut self,
loc: Location<GPR, XMM>,
ret: Location<GPR, XMM>,
) -> Result<(), CompileError>
fn convert_f32_f64( &mut self, loc: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>
source§fn f64_neg(
&mut self,
loc: Location<GPR, XMM>,
ret: Location<GPR, XMM>,
) -> Result<(), CompileError>
fn f64_neg( &mut self, loc: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>
source§fn f64_abs(
&mut self,
loc: Location<GPR, XMM>,
ret: Location<GPR, XMM>,
) -> Result<(), CompileError>
fn f64_abs( &mut self, loc: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>
source§fn emit_i64_copysign(
&mut self,
tmp1: GPR,
tmp2: GPR,
) -> Result<(), CompileError>
fn emit_i64_copysign( &mut self, tmp1: GPR, tmp2: GPR, ) -> Result<(), CompileError>
source§fn f64_sqrt(
&mut self,
loc: Location<GPR, XMM>,
ret: Location<GPR, XMM>,
) -> Result<(), CompileError>
fn f64_sqrt( &mut self, loc: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>
source§fn f64_trunc(
&mut self,
loc: Location<GPR, XMM>,
ret: Location<GPR, XMM>,
) -> Result<(), CompileError>
fn f64_trunc( &mut self, loc: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>
source§fn f64_ceil(
&mut self,
loc: Location<GPR, XMM>,
ret: Location<GPR, XMM>,
) -> Result<(), CompileError>
fn f64_ceil( &mut self, loc: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>
source§fn f64_floor(
&mut self,
loc: Location<GPR, XMM>,
ret: Location<GPR, XMM>,
) -> Result<(), CompileError>
fn f64_floor( &mut self, loc: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>
source§fn f64_nearest(
&mut self,
loc: Location<GPR, XMM>,
ret: Location<GPR, XMM>,
) -> Result<(), CompileError>
fn f64_nearest( &mut self, loc: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>
source§fn f64_cmp_ge(
&mut self,
loc_a: Location<GPR, XMM>,
loc_b: Location<GPR, XMM>,
ret: Location<GPR, XMM>,
) -> Result<(), CompileError>
fn f64_cmp_ge( &mut self, loc_a: Location<GPR, XMM>, loc_b: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>
source§fn f64_cmp_gt(
&mut self,
loc_a: Location<GPR, XMM>,
loc_b: Location<GPR, XMM>,
ret: Location<GPR, XMM>,
) -> Result<(), CompileError>
fn f64_cmp_gt( &mut self, loc_a: Location<GPR, XMM>, loc_b: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>
source§fn f64_cmp_le(
&mut self,
loc_a: Location<GPR, XMM>,
loc_b: Location<GPR, XMM>,
ret: Location<GPR, XMM>,
) -> Result<(), CompileError>
fn f64_cmp_le( &mut self, loc_a: Location<GPR, XMM>, loc_b: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>
source§fn f64_cmp_lt(
&mut self,
loc_a: Location<GPR, XMM>,
loc_b: Location<GPR, XMM>,
ret: Location<GPR, XMM>,
) -> Result<(), CompileError>
fn f64_cmp_lt( &mut self, loc_a: Location<GPR, XMM>, loc_b: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>
source§fn f64_cmp_ne(
&mut self,
loc_a: Location<GPR, XMM>,
loc_b: Location<GPR, XMM>,
ret: Location<GPR, XMM>,
) -> Result<(), CompileError>
fn f64_cmp_ne( &mut self, loc_a: Location<GPR, XMM>, loc_b: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>
source§fn f64_cmp_eq(
&mut self,
loc_a: Location<GPR, XMM>,
loc_b: Location<GPR, XMM>,
ret: Location<GPR, XMM>,
) -> Result<(), CompileError>
fn f64_cmp_eq( &mut self, loc_a: Location<GPR, XMM>, loc_b: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>
source§fn f64_min(
&mut self,
loc_a: Location<GPR, XMM>,
loc_b: Location<GPR, XMM>,
ret: Location<GPR, XMM>,
) -> Result<(), CompileError>
fn f64_min( &mut self, loc_a: Location<GPR, XMM>, loc_b: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>
source§fn f64_max(
&mut self,
loc_a: Location<GPR, XMM>,
loc_b: Location<GPR, XMM>,
ret: Location<GPR, XMM>,
) -> Result<(), CompileError>
fn f64_max( &mut self, loc_a: Location<GPR, XMM>, loc_b: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>
source§fn f64_add(
&mut self,
loc_a: Location<GPR, XMM>,
loc_b: Location<GPR, XMM>,
ret: Location<GPR, XMM>,
) -> Result<(), CompileError>
fn f64_add( &mut self, loc_a: Location<GPR, XMM>, loc_b: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>
source§fn f64_sub(
&mut self,
loc_a: Location<GPR, XMM>,
loc_b: Location<GPR, XMM>,
ret: Location<GPR, XMM>,
) -> Result<(), CompileError>
fn f64_sub( &mut self, loc_a: Location<GPR, XMM>, loc_b: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>
source§fn f64_mul(
&mut self,
loc_a: Location<GPR, XMM>,
loc_b: Location<GPR, XMM>,
ret: Location<GPR, XMM>,
) -> Result<(), CompileError>
fn f64_mul( &mut self, loc_a: Location<GPR, XMM>, loc_b: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>
source§fn f64_div(
&mut self,
loc_a: Location<GPR, XMM>,
loc_b: Location<GPR, XMM>,
ret: Location<GPR, XMM>,
) -> Result<(), CompileError>
fn f64_div( &mut self, loc_a: Location<GPR, XMM>, loc_b: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>
source§fn f32_neg(
&mut self,
loc: Location<GPR, XMM>,
ret: Location<GPR, XMM>,
) -> Result<(), CompileError>
fn f32_neg( &mut self, loc: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>
source§fn f32_abs(
&mut self,
loc: Location<GPR, XMM>,
ret: Location<GPR, XMM>,
) -> Result<(), CompileError>
fn f32_abs( &mut self, loc: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>
source§fn emit_i32_copysign(
&mut self,
tmp1: GPR,
tmp2: GPR,
) -> Result<(), CompileError>
fn emit_i32_copysign( &mut self, tmp1: GPR, tmp2: GPR, ) -> Result<(), CompileError>
source§fn f32_sqrt(
&mut self,
loc: Location<GPR, XMM>,
ret: Location<GPR, XMM>,
) -> Result<(), CompileError>
fn f32_sqrt( &mut self, loc: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>
source§fn f32_trunc(
&mut self,
loc: Location<GPR, XMM>,
ret: Location<GPR, XMM>,
) -> Result<(), CompileError>
fn f32_trunc( &mut self, loc: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>
source§fn f32_ceil(
&mut self,
loc: Location<GPR, XMM>,
ret: Location<GPR, XMM>,
) -> Result<(), CompileError>
fn f32_ceil( &mut self, loc: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>
source§fn f32_floor(
&mut self,
loc: Location<GPR, XMM>,
ret: Location<GPR, XMM>,
) -> Result<(), CompileError>
fn f32_floor( &mut self, loc: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>
source§fn f32_nearest(
&mut self,
loc: Location<GPR, XMM>,
ret: Location<GPR, XMM>,
) -> Result<(), CompileError>
fn f32_nearest( &mut self, loc: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>
source§fn f32_cmp_ge(
&mut self,
loc_a: Location<GPR, XMM>,
loc_b: Location<GPR, XMM>,
ret: Location<GPR, XMM>,
) -> Result<(), CompileError>
fn f32_cmp_ge( &mut self, loc_a: Location<GPR, XMM>, loc_b: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>
source§fn f32_cmp_gt(
&mut self,
loc_a: Location<GPR, XMM>,
loc_b: Location<GPR, XMM>,
ret: Location<GPR, XMM>,
) -> Result<(), CompileError>
fn f32_cmp_gt( &mut self, loc_a: Location<GPR, XMM>, loc_b: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>
source§fn f32_cmp_le(
&mut self,
loc_a: Location<GPR, XMM>,
loc_b: Location<GPR, XMM>,
ret: Location<GPR, XMM>,
) -> Result<(), CompileError>
fn f32_cmp_le( &mut self, loc_a: Location<GPR, XMM>, loc_b: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>
source§fn f32_cmp_lt(
&mut self,
loc_a: Location<GPR, XMM>,
loc_b: Location<GPR, XMM>,
ret: Location<GPR, XMM>,
) -> Result<(), CompileError>
fn f32_cmp_lt( &mut self, loc_a: Location<GPR, XMM>, loc_b: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>
source§fn f32_cmp_ne(
&mut self,
loc_a: Location<GPR, XMM>,
loc_b: Location<GPR, XMM>,
ret: Location<GPR, XMM>,
) -> Result<(), CompileError>
fn f32_cmp_ne( &mut self, loc_a: Location<GPR, XMM>, loc_b: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>
source§fn f32_cmp_eq(
&mut self,
loc_a: Location<GPR, XMM>,
loc_b: Location<GPR, XMM>,
ret: Location<GPR, XMM>,
) -> Result<(), CompileError>
fn f32_cmp_eq( &mut self, loc_a: Location<GPR, XMM>, loc_b: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>
source§fn f32_min(
&mut self,
loc_a: Location<GPR, XMM>,
loc_b: Location<GPR, XMM>,
ret: Location<GPR, XMM>,
) -> Result<(), CompileError>
fn f32_min( &mut self, loc_a: Location<GPR, XMM>, loc_b: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>
source§fn f32_max(
&mut self,
loc_a: Location<GPR, XMM>,
loc_b: Location<GPR, XMM>,
ret: Location<GPR, XMM>,
) -> Result<(), CompileError>
fn f32_max( &mut self, loc_a: Location<GPR, XMM>, loc_b: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>
source§fn f32_add(
&mut self,
loc_a: Location<GPR, XMM>,
loc_b: Location<GPR, XMM>,
ret: Location<GPR, XMM>,
) -> Result<(), CompileError>
fn f32_add( &mut self, loc_a: Location<GPR, XMM>, loc_b: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>
source§fn f32_sub(
&mut self,
loc_a: Location<GPR, XMM>,
loc_b: Location<GPR, XMM>,
ret: Location<GPR, XMM>,
) -> Result<(), CompileError>
fn f32_sub( &mut self, loc_a: Location<GPR, XMM>, loc_b: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>
source§fn f32_mul(
&mut self,
loc_a: Location<GPR, XMM>,
loc_b: Location<GPR, XMM>,
ret: Location<GPR, XMM>,
) -> Result<(), CompileError>
fn f32_mul( &mut self, loc_a: Location<GPR, XMM>, loc_b: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>
source§fn f32_div(
&mut self,
loc_a: Location<GPR, XMM>,
loc_b: Location<GPR, XMM>,
ret: Location<GPR, XMM>,
) -> Result<(), CompileError>
fn f32_div( &mut self, loc_a: Location<GPR, XMM>, loc_b: Location<GPR, XMM>, ret: Location<GPR, XMM>, ) -> Result<(), CompileError>
source§fn gen_std_trampoline(
&self,
sig: &FunctionType,
calling_convention: CallingConvention,
) -> Result<FunctionBody, CompileError>
fn gen_std_trampoline( &self, sig: &FunctionType, calling_convention: CallingConvention, ) -> Result<FunctionBody, CompileError>
source§fn gen_std_dynamic_import_trampoline(
&self,
vmoffsets: &VMOffsets,
sig: &FunctionType,
calling_convention: CallingConvention,
) -> Result<FunctionBody, CompileError>
fn gen_std_dynamic_import_trampoline( &self, vmoffsets: &VMOffsets, sig: &FunctionType, calling_convention: CallingConvention, ) -> Result<FunctionBody, CompileError>
source§fn gen_import_call_trampoline(
&self,
vmoffsets: &VMOffsets,
index: FunctionIndex,
sig: &FunctionType,
calling_convention: CallingConvention,
) -> Result<CustomSection, CompileError>
fn gen_import_call_trampoline( &self, vmoffsets: &VMOffsets, index: FunctionIndex, sig: &FunctionType, calling_convention: CallingConvention, ) -> Result<CustomSection, CompileError>
source§fn gen_dwarf_unwind_info(
&mut self,
code_len: usize,
) -> Option<UnwindInstructions>
fn gen_dwarf_unwind_info( &mut self, code_len: usize, ) -> Option<UnwindInstructions>
Auto Trait Implementations§
impl Freeze for MachineX86_64
impl RefUnwindSafe for MachineX86_64
impl Send for MachineX86_64
impl Sync for MachineX86_64
impl Unpin for MachineX86_64
impl UnwindSafe for MachineX86_64
Blanket Implementations§
§impl<T> ArchivePointee for T
impl<T> ArchivePointee for T
§type ArchivedMetadata = ()
type ArchivedMetadata = ()
§fn pointer_metadata(
_: &<T as ArchivePointee>::ArchivedMetadata,
) -> <T as Pointee>::Metadata
fn pointer_metadata( _: &<T as ArchivePointee>::ArchivedMetadata, ) -> <T as Pointee>::Metadata
source§impl<T> BorrowMut<T> for Twhere
T: ?Sized,
impl<T> BorrowMut<T> for Twhere
T: ?Sized,
source§fn borrow_mut(&mut self) -> &mut T
fn borrow_mut(&mut self) -> &mut T
source§impl<T> IntoEither for T
impl<T> IntoEither for T
source§fn into_either(self, into_left: bool) -> Either<Self, Self>
fn into_either(self, into_left: bool) -> Either<Self, Self>
self
into a Left
variant of Either<Self, Self>
if into_left
is true
.
Converts self
into a Right
variant of Either<Self, Self>
otherwise. Read moresource§fn into_either_with<F>(self, into_left: F) -> Either<Self, Self>
fn into_either_with<F>(self, into_left: F) -> Either<Self, Self>
self
into a Left
variant of Either<Self, Self>
if into_left(&self)
returns true
.
Converts self
into a Right
variant of Either<Self, Self>
otherwise. Read more