Trait wasmer_compiler_singlepass::machine::Machine

source ·
pub trait Machine {
    type GPR: Copy + Eq + Debug + Reg;
    type SIMD: Copy + Eq + Debug + Reg;

Show 297 methods // Required methods fn assembler_get_offset(&self) -> AssemblyOffset; fn index_from_gpr(&self, x: Self::GPR) -> RegisterIndex; fn index_from_simd(&self, x: Self::SIMD) -> RegisterIndex; fn get_vmctx_reg(&self) -> Self::GPR; fn pick_gpr(&self) -> Option<Self::GPR>; fn pick_temp_gpr(&self) -> Option<Self::GPR>; fn get_used_gprs(&self) -> Vec<Self::GPR>; fn get_used_simd(&self) -> Vec<Self::SIMD>; fn acquire_temp_gpr(&mut self) -> Option<Self::GPR>; fn release_gpr(&mut self, gpr: Self::GPR); fn reserve_unused_temp_gpr(&mut self, gpr: Self::GPR) -> Self::GPR; fn reserve_gpr(&mut self, gpr: Self::GPR); fn push_used_gpr( &mut self, grps: &[Self::GPR], ) -> Result<usize, CompileError>; fn pop_used_gpr(&mut self, grps: &[Self::GPR]) -> Result<(), CompileError>; fn pick_simd(&self) -> Option<Self::SIMD>; fn pick_temp_simd(&self) -> Option<Self::SIMD>; fn acquire_temp_simd(&mut self) -> Option<Self::SIMD>; fn reserve_simd(&mut self, simd: Self::SIMD); fn release_simd(&mut self, simd: Self::SIMD); fn push_used_simd( &mut self, simds: &[Self::SIMD], ) -> Result<usize, CompileError>; fn pop_used_simd( &mut self, simds: &[Self::SIMD], ) -> Result<(), CompileError>; fn round_stack_adjust(&self, value: usize) -> usize; fn set_srcloc(&mut self, offset: u32); fn mark_address_range_with_trap_code( &mut self, code: TrapCode, begin: usize, end: usize, ); fn mark_address_with_trap_code(&mut self, code: TrapCode); fn mark_instruction_with_trap_code(&mut self, code: TrapCode) -> usize; fn mark_instruction_address_end(&mut self, begin: usize); fn insert_stackoverflow(&mut self); fn collect_trap_information(&self) -> Vec<TrapInformation>; fn instructions_address_map(&self) -> Vec<InstructionAddressMap>; fn local_on_stack( &mut self, stack_offset: i32, ) -> Location<Self::GPR, Self::SIMD>; fn adjust_stack( &mut self, delta_stack_offset: u32, ) -> Result<(), CompileError>; fn restore_stack( &mut self, delta_stack_offset: u32, ) -> Result<(), CompileError>; fn pop_stack_locals( &mut self, delta_stack_offset: u32, ) -> Result<(), CompileError>; fn zero_location( &mut self, size: Size, location: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>; fn local_pointer(&self) -> Self::GPR; fn move_location_for_native( &mut self, size: Size, loc: Location<Self::GPR, Self::SIMD>, dest: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>; fn is_local_on_stack(&self, idx: usize) -> bool; fn get_local_location( &self, idx: usize, callee_saved_regs_size: usize, ) -> Location<Self::GPR, Self::SIMD>; fn move_local( &mut self, stack_offset: i32, location: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>; fn list_to_save( &self, calling_convention: CallingConvention, ) -> Vec<Location<Self::GPR, Self::SIMD>>; fn get_param_location( &self, idx: usize, sz: Size, stack_offset: &mut usize, calling_convention: CallingConvention, ) -> Location<Self::GPR, Self::SIMD>; fn get_call_param_location( &self, idx: usize, sz: Size, stack_offset: &mut usize, calling_convention: CallingConvention, ) -> Location<Self::GPR, Self::SIMD>; fn get_simple_param_location( &self, idx: usize, calling_convention: CallingConvention, ) -> Location<Self::GPR, Self::SIMD>; fn move_location( &mut self, size: Size, source: Location<Self::GPR, Self::SIMD>, dest: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>; fn move_location_extend( &mut self, size_val: Size, signed: bool, source: Location<Self::GPR, Self::SIMD>, size_op: Size, dest: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>; fn load_address( &mut self, size: Size, gpr: Location<Self::GPR, Self::SIMD>, mem: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>; fn init_stack_loc( &mut self, init_stack_loc_cnt: u64, last_stack_loc: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>; fn restore_saved_area( &mut self, saved_area_offset: i32, ) -> Result<(), CompileError>; fn pop_location( &mut self, location: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>; fn new_machine_state(&self) -> MachineState; fn assembler_finalize(self) -> Result<Vec<u8>, CompileError>; fn get_offset(&self) -> AssemblyOffset; fn finalize_function(&mut self) -> Result<(), CompileError>; fn emit_function_prolog(&mut self) -> Result<(), CompileError>; fn emit_function_epilog(&mut self) -> Result<(), CompileError>; fn emit_function_return_value( &mut self, ty: WpType, cannonicalize: bool, loc: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>; fn emit_function_return_float(&mut self) -> Result<(), CompileError>; fn arch_supports_canonicalize_nan(&self) -> bool; fn canonicalize_nan( &mut self, sz: Size, input: Location<Self::GPR, Self::SIMD>, output: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>; fn emit_illegal_op(&mut self, trp: TrapCode) -> Result<(), CompileError>; fn get_label(&mut self) -> DynamicLabel; fn emit_label(&mut self, label: DynamicLabel) -> Result<(), CompileError>; fn get_grp_for_call(&self) -> Self::GPR; fn emit_call_register( &mut self, register: Self::GPR, ) -> Result<(), CompileError>; fn emit_call_label( &mut self, label: DynamicLabel, ) -> Result<(), CompileError>; fn arch_requires_indirect_call_trampoline(&self) -> bool; fn arch_emit_indirect_call_with_trampoline( &mut self, location: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>; fn emit_call_location( &mut self, location: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>; fn get_gpr_for_ret(&self) -> Self::GPR; fn get_simd_for_ret(&self) -> Self::SIMD; fn emit_debug_breakpoint(&mut self) -> Result<(), CompileError>; fn location_address( &mut self, size: Size, source: Location<Self::GPR, Self::SIMD>, dest: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>; fn location_and( &mut self, size: Size, source: Location<Self::GPR, Self::SIMD>, dest: Location<Self::GPR, Self::SIMD>, flags: bool, ) -> Result<(), CompileError>; fn location_xor( &mut self, size: Size, source: Location<Self::GPR, Self::SIMD>, dest: Location<Self::GPR, Self::SIMD>, flags: bool, ) -> Result<(), CompileError>; fn location_or( &mut self, size: Size, source: Location<Self::GPR, Self::SIMD>, dest: Location<Self::GPR, Self::SIMD>, flags: bool, ) -> Result<(), CompileError>; fn location_add( &mut self, size: Size, source: Location<Self::GPR, Self::SIMD>, dest: Location<Self::GPR, Self::SIMD>, flags: bool, ) -> Result<(), CompileError>; fn location_sub( &mut self, size: Size, source: Location<Self::GPR, Self::SIMD>, dest: Location<Self::GPR, Self::SIMD>, flags: bool, ) -> Result<(), CompileError>; fn location_neg( &mut self, size_val: Size, signed: bool, source: Location<Self::GPR, Self::SIMD>, size_op: Size, dest: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>; fn location_cmp( &mut self, size: Size, source: Location<Self::GPR, Self::SIMD>, dest: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>; fn location_test( &mut self, size: Size, source: Location<Self::GPR, Self::SIMD>, dest: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>; fn jmp_unconditionnal( &mut self, label: DynamicLabel, ) -> Result<(), CompileError>; fn jmp_on_equal(&mut self, label: DynamicLabel) -> Result<(), CompileError>; fn jmp_on_different( &mut self, label: DynamicLabel, ) -> Result<(), CompileError>; fn jmp_on_above(&mut self, label: DynamicLabel) -> Result<(), CompileError>; fn jmp_on_aboveequal( &mut self, label: DynamicLabel, ) -> Result<(), CompileError>; fn jmp_on_belowequal( &mut self, label: DynamicLabel, ) -> Result<(), CompileError>; fn jmp_on_overflow( &mut self, label: DynamicLabel, ) -> Result<(), CompileError>; fn emit_jmp_to_jumptable( &mut self, label: DynamicLabel, cond: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>; fn align_for_loop(&mut self) -> Result<(), CompileError>; fn emit_ret(&mut self) -> Result<(), CompileError>; fn emit_push( &mut self, size: Size, loc: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>; fn emit_pop( &mut self, size: Size, loc: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>; fn emit_relaxed_mov( &mut self, sz: Size, src: Location<Self::GPR, Self::SIMD>, dst: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>; fn emit_relaxed_cmp( &mut self, sz: Size, src: Location<Self::GPR, Self::SIMD>, dst: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>; fn emit_memory_fence(&mut self) -> Result<(), CompileError>; fn emit_relaxed_zero_extension( &mut self, sz_src: Size, src: Location<Self::GPR, Self::SIMD>, sz_dst: Size, dst: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>; fn emit_relaxed_sign_extension( &mut self, sz_src: Size, src: Location<Self::GPR, Self::SIMD>, sz_dst: Size, dst: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>; fn emit_imul_imm32( &mut self, size: Size, imm32: u32, gpr: Self::GPR, ) -> Result<(), CompileError>; fn emit_binop_add32( &mut self, loc_a: Location<Self::GPR, Self::SIMD>, loc_b: Location<Self::GPR, Self::SIMD>, ret: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>; fn emit_binop_sub32( &mut self, loc_a: Location<Self::GPR, Self::SIMD>, loc_b: Location<Self::GPR, Self::SIMD>, ret: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>; fn emit_binop_mul32( &mut self, loc_a: Location<Self::GPR, Self::SIMD>, loc_b: Location<Self::GPR, Self::SIMD>, ret: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>; fn emit_binop_udiv32( &mut self, loc_a: Location<Self::GPR, Self::SIMD>, loc_b: Location<Self::GPR, Self::SIMD>, ret: Location<Self::GPR, Self::SIMD>, integer_division_by_zero: DynamicLabel, integer_overflow: DynamicLabel, ) -> Result<usize, CompileError>; fn emit_binop_sdiv32( &mut self, loc_a: Location<Self::GPR, Self::SIMD>, loc_b: Location<Self::GPR, Self::SIMD>, ret: Location<Self::GPR, Self::SIMD>, integer_division_by_zero: DynamicLabel, integer_overflow: DynamicLabel, ) -> Result<usize, CompileError>; fn emit_binop_urem32( &mut self, loc_a: Location<Self::GPR, Self::SIMD>, loc_b: Location<Self::GPR, Self::SIMD>, ret: Location<Self::GPR, Self::SIMD>, integer_division_by_zero: DynamicLabel, integer_overflow: DynamicLabel, ) -> Result<usize, CompileError>; fn emit_binop_srem32( &mut self, loc_a: Location<Self::GPR, Self::SIMD>, loc_b: Location<Self::GPR, Self::SIMD>, ret: Location<Self::GPR, Self::SIMD>, integer_division_by_zero: DynamicLabel, integer_overflow: DynamicLabel, ) -> Result<usize, CompileError>; fn emit_binop_and32( &mut self, loc_a: Location<Self::GPR, Self::SIMD>, loc_b: Location<Self::GPR, Self::SIMD>, ret: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>; fn emit_binop_or32( &mut self, loc_a: Location<Self::GPR, Self::SIMD>, loc_b: Location<Self::GPR, Self::SIMD>, ret: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>; fn emit_binop_xor32( &mut self, loc_a: Location<Self::GPR, Self::SIMD>, loc_b: Location<Self::GPR, Self::SIMD>, ret: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>; fn i32_cmp_ge_s( &mut self, loc_a: Location<Self::GPR, Self::SIMD>, loc_b: Location<Self::GPR, Self::SIMD>, ret: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>; fn i32_cmp_gt_s( &mut self, loc_a: Location<Self::GPR, Self::SIMD>, loc_b: Location<Self::GPR, Self::SIMD>, ret: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>; fn i32_cmp_le_s( &mut self, loc_a: Location<Self::GPR, Self::SIMD>, loc_b: Location<Self::GPR, Self::SIMD>, ret: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>; fn i32_cmp_lt_s( &mut self, loc_a: Location<Self::GPR, Self::SIMD>, loc_b: Location<Self::GPR, Self::SIMD>, ret: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>; fn i32_cmp_ge_u( &mut self, loc_a: Location<Self::GPR, Self::SIMD>, loc_b: Location<Self::GPR, Self::SIMD>, ret: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>; fn i32_cmp_gt_u( &mut self, loc_a: Location<Self::GPR, Self::SIMD>, loc_b: Location<Self::GPR, Self::SIMD>, ret: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>; fn i32_cmp_le_u( &mut self, loc_a: Location<Self::GPR, Self::SIMD>, loc_b: Location<Self::GPR, Self::SIMD>, ret: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>; fn i32_cmp_lt_u( &mut self, loc_a: Location<Self::GPR, Self::SIMD>, loc_b: Location<Self::GPR, Self::SIMD>, ret: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>; fn i32_cmp_ne( &mut self, loc_a: Location<Self::GPR, Self::SIMD>, loc_b: Location<Self::GPR, Self::SIMD>, ret: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>; fn i32_cmp_eq( &mut self, loc_a: Location<Self::GPR, Self::SIMD>, loc_b: Location<Self::GPR, Self::SIMD>, ret: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>; fn i32_clz( &mut self, loc: Location<Self::GPR, Self::SIMD>, ret: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>; fn i32_ctz( &mut self, loc: Location<Self::GPR, Self::SIMD>, ret: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>; fn i32_popcnt( &mut self, loc: Location<Self::GPR, Self::SIMD>, ret: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>; fn i32_shl( &mut self, loc_a: Location<Self::GPR, Self::SIMD>, loc_b: Location<Self::GPR, Self::SIMD>, ret: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>; fn i32_shr( &mut self, loc_a: Location<Self::GPR, Self::SIMD>, loc_b: Location<Self::GPR, Self::SIMD>, ret: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>; fn i32_sar( &mut self, loc_a: Location<Self::GPR, Self::SIMD>, loc_b: Location<Self::GPR, Self::SIMD>, ret: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>; fn i32_rol( &mut self, loc_a: Location<Self::GPR, Self::SIMD>, loc_b: Location<Self::GPR, Self::SIMD>, ret: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>; fn i32_ror( &mut self, loc_a: Location<Self::GPR, Self::SIMD>, loc_b: Location<Self::GPR, Self::SIMD>, ret: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>; fn i32_load( &mut self, addr: Location<Self::GPR, Self::SIMD>, memarg: &MemArg, ret: Location<Self::GPR, Self::SIMD>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>; fn i32_load_8u( &mut self, addr: Location<Self::GPR, Self::SIMD>, memarg: &MemArg, ret: Location<Self::GPR, Self::SIMD>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>; fn i32_load_8s( &mut self, addr: Location<Self::GPR, Self::SIMD>, memarg: &MemArg, ret: Location<Self::GPR, Self::SIMD>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>; fn i32_load_16u( &mut self, addr: Location<Self::GPR, Self::SIMD>, memarg: &MemArg, ret: Location<Self::GPR, Self::SIMD>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>; fn i32_load_16s( &mut self, addr: Location<Self::GPR, Self::SIMD>, memarg: &MemArg, ret: Location<Self::GPR, Self::SIMD>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>; fn i32_atomic_load( &mut self, addr: Location<Self::GPR, Self::SIMD>, memarg: &MemArg, ret: Location<Self::GPR, Self::SIMD>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>; fn i32_atomic_load_8u( &mut self, addr: Location<Self::GPR, Self::SIMD>, memarg: &MemArg, ret: Location<Self::GPR, Self::SIMD>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>; fn i32_atomic_load_16u( &mut self, addr: Location<Self::GPR, Self::SIMD>, memarg: &MemArg, ret: Location<Self::GPR, Self::SIMD>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>; fn i32_save( &mut self, value: Location<Self::GPR, Self::SIMD>, memarg: &MemArg, addr: Location<Self::GPR, Self::SIMD>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>; fn i32_save_8( &mut self, value: Location<Self::GPR, Self::SIMD>, memarg: &MemArg, addr: Location<Self::GPR, Self::SIMD>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>; fn i32_save_16( &mut self, value: Location<Self::GPR, Self::SIMD>, memarg: &MemArg, addr: Location<Self::GPR, Self::SIMD>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>; fn i32_atomic_save( &mut self, value: Location<Self::GPR, Self::SIMD>, memarg: &MemArg, addr: Location<Self::GPR, Self::SIMD>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>; fn i32_atomic_save_8( &mut self, value: Location<Self::GPR, Self::SIMD>, memarg: &MemArg, addr: Location<Self::GPR, Self::SIMD>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>; fn i32_atomic_save_16( &mut self, value: Location<Self::GPR, Self::SIMD>, memarg: &MemArg, addr: Location<Self::GPR, Self::SIMD>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>; fn i32_atomic_add( &mut self, loc: Location<Self::GPR, Self::SIMD>, target: Location<Self::GPR, Self::SIMD>, memarg: &MemArg, ret: Location<Self::GPR, Self::SIMD>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>; fn i32_atomic_add_8u( &mut self, loc: Location<Self::GPR, Self::SIMD>, target: Location<Self::GPR, Self::SIMD>, memarg: &MemArg, ret: Location<Self::GPR, Self::SIMD>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>; fn i32_atomic_add_16u( &mut self, loc: Location<Self::GPR, Self::SIMD>, target: Location<Self::GPR, Self::SIMD>, memarg: &MemArg, ret: Location<Self::GPR, Self::SIMD>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>; fn i32_atomic_sub( &mut self, loc: Location<Self::GPR, Self::SIMD>, target: Location<Self::GPR, Self::SIMD>, memarg: &MemArg, ret: Location<Self::GPR, Self::SIMD>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>; fn i32_atomic_sub_8u( &mut self, loc: Location<Self::GPR, Self::SIMD>, target: Location<Self::GPR, Self::SIMD>, memarg: &MemArg, ret: Location<Self::GPR, Self::SIMD>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>; fn i32_atomic_sub_16u( &mut self, loc: Location<Self::GPR, Self::SIMD>, target: Location<Self::GPR, Self::SIMD>, memarg: &MemArg, ret: Location<Self::GPR, Self::SIMD>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>; fn i32_atomic_and( &mut self, loc: Location<Self::GPR, Self::SIMD>, target: Location<Self::GPR, Self::SIMD>, memarg: &MemArg, ret: Location<Self::GPR, Self::SIMD>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>; fn i32_atomic_and_8u( &mut self, loc: Location<Self::GPR, Self::SIMD>, target: Location<Self::GPR, Self::SIMD>, memarg: &MemArg, ret: Location<Self::GPR, Self::SIMD>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>; fn i32_atomic_and_16u( &mut self, loc: Location<Self::GPR, Self::SIMD>, target: Location<Self::GPR, Self::SIMD>, memarg: &MemArg, ret: Location<Self::GPR, Self::SIMD>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>; fn i32_atomic_or( &mut self, loc: Location<Self::GPR, Self::SIMD>, target: Location<Self::GPR, Self::SIMD>, memarg: &MemArg, ret: Location<Self::GPR, Self::SIMD>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>; fn i32_atomic_or_8u( &mut self, loc: Location<Self::GPR, Self::SIMD>, target: Location<Self::GPR, Self::SIMD>, memarg: &MemArg, ret: Location<Self::GPR, Self::SIMD>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>; fn i32_atomic_or_16u( &mut self, loc: Location<Self::GPR, Self::SIMD>, target: Location<Self::GPR, Self::SIMD>, memarg: &MemArg, ret: Location<Self::GPR, Self::SIMD>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>; fn i32_atomic_xor( &mut self, loc: Location<Self::GPR, Self::SIMD>, target: Location<Self::GPR, Self::SIMD>, memarg: &MemArg, ret: Location<Self::GPR, Self::SIMD>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>; fn i32_atomic_xor_8u( &mut self, loc: Location<Self::GPR, Self::SIMD>, target: Location<Self::GPR, Self::SIMD>, memarg: &MemArg, ret: Location<Self::GPR, Self::SIMD>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>; fn i32_atomic_xor_16u( &mut self, loc: Location<Self::GPR, Self::SIMD>, target: Location<Self::GPR, Self::SIMD>, memarg: &MemArg, ret: Location<Self::GPR, Self::SIMD>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>; fn i32_atomic_xchg( &mut self, loc: Location<Self::GPR, Self::SIMD>, target: Location<Self::GPR, Self::SIMD>, memarg: &MemArg, ret: Location<Self::GPR, Self::SIMD>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>; fn i32_atomic_xchg_8u( &mut self, loc: Location<Self::GPR, Self::SIMD>, target: Location<Self::GPR, Self::SIMD>, memarg: &MemArg, ret: Location<Self::GPR, Self::SIMD>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>; fn i32_atomic_xchg_16u( &mut self, loc: Location<Self::GPR, Self::SIMD>, target: Location<Self::GPR, Self::SIMD>, memarg: &MemArg, ret: Location<Self::GPR, Self::SIMD>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>; fn i32_atomic_cmpxchg( &mut self, new: Location<Self::GPR, Self::SIMD>, cmp: Location<Self::GPR, Self::SIMD>, target: Location<Self::GPR, Self::SIMD>, memarg: &MemArg, ret: Location<Self::GPR, Self::SIMD>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>; fn i32_atomic_cmpxchg_8u( &mut self, new: Location<Self::GPR, Self::SIMD>, cmp: Location<Self::GPR, Self::SIMD>, target: Location<Self::GPR, Self::SIMD>, memarg: &MemArg, ret: Location<Self::GPR, Self::SIMD>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>; fn i32_atomic_cmpxchg_16u( &mut self, new: Location<Self::GPR, Self::SIMD>, cmp: Location<Self::GPR, Self::SIMD>, target: Location<Self::GPR, Self::SIMD>, memarg: &MemArg, ret: Location<Self::GPR, Self::SIMD>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>; fn emit_call_with_reloc( &mut self, calling_convention: CallingConvention, reloc_target: RelocationTarget, ) -> Result<Vec<Relocation>, CompileError>; fn emit_binop_add64( &mut self, loc_a: Location<Self::GPR, Self::SIMD>, loc_b: Location<Self::GPR, Self::SIMD>, ret: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>; fn emit_binop_sub64( &mut self, loc_a: Location<Self::GPR, Self::SIMD>, loc_b: Location<Self::GPR, Self::SIMD>, ret: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>; fn emit_binop_mul64( &mut self, loc_a: Location<Self::GPR, Self::SIMD>, loc_b: Location<Self::GPR, Self::SIMD>, ret: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>; fn emit_binop_udiv64( &mut self, loc_a: Location<Self::GPR, Self::SIMD>, loc_b: Location<Self::GPR, Self::SIMD>, ret: Location<Self::GPR, Self::SIMD>, integer_division_by_zero: DynamicLabel, integer_overflow: DynamicLabel, ) -> Result<usize, CompileError>; fn emit_binop_sdiv64( &mut self, loc_a: Location<Self::GPR, Self::SIMD>, loc_b: Location<Self::GPR, Self::SIMD>, ret: Location<Self::GPR, Self::SIMD>, integer_division_by_zero: DynamicLabel, integer_overflow: DynamicLabel, ) -> Result<usize, CompileError>; fn emit_binop_urem64( &mut self, loc_a: Location<Self::GPR, Self::SIMD>, loc_b: Location<Self::GPR, Self::SIMD>, ret: Location<Self::GPR, Self::SIMD>, integer_division_by_zero: DynamicLabel, integer_overflow: DynamicLabel, ) -> Result<usize, CompileError>; fn emit_binop_srem64( &mut self, loc_a: Location<Self::GPR, Self::SIMD>, loc_b: Location<Self::GPR, Self::SIMD>, ret: Location<Self::GPR, Self::SIMD>, integer_division_by_zero: DynamicLabel, integer_overflow: DynamicLabel, ) -> Result<usize, CompileError>; fn emit_binop_and64( &mut self, loc_a: Location<Self::GPR, Self::SIMD>, loc_b: Location<Self::GPR, Self::SIMD>, ret: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>; fn emit_binop_or64( &mut self, loc_a: Location<Self::GPR, Self::SIMD>, loc_b: Location<Self::GPR, Self::SIMD>, ret: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>; fn emit_binop_xor64( &mut self, loc_a: Location<Self::GPR, Self::SIMD>, loc_b: Location<Self::GPR, Self::SIMD>, ret: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>; fn i64_cmp_ge_s( &mut self, loc_a: Location<Self::GPR, Self::SIMD>, loc_b: Location<Self::GPR, Self::SIMD>, ret: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>; fn i64_cmp_gt_s( &mut self, loc_a: Location<Self::GPR, Self::SIMD>, loc_b: Location<Self::GPR, Self::SIMD>, ret: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>; fn i64_cmp_le_s( &mut self, loc_a: Location<Self::GPR, Self::SIMD>, loc_b: Location<Self::GPR, Self::SIMD>, ret: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>; fn i64_cmp_lt_s( &mut self, loc_a: Location<Self::GPR, Self::SIMD>, loc_b: Location<Self::GPR, Self::SIMD>, ret: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>; fn i64_cmp_ge_u( &mut self, loc_a: Location<Self::GPR, Self::SIMD>, loc_b: Location<Self::GPR, Self::SIMD>, ret: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>; fn i64_cmp_gt_u( &mut self, loc_a: Location<Self::GPR, Self::SIMD>, loc_b: Location<Self::GPR, Self::SIMD>, ret: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>; fn i64_cmp_le_u( &mut self, loc_a: Location<Self::GPR, Self::SIMD>, loc_b: Location<Self::GPR, Self::SIMD>, ret: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>; fn i64_cmp_lt_u( &mut self, loc_a: Location<Self::GPR, Self::SIMD>, loc_b: Location<Self::GPR, Self::SIMD>, ret: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>; fn i64_cmp_ne( &mut self, loc_a: Location<Self::GPR, Self::SIMD>, loc_b: Location<Self::GPR, Self::SIMD>, ret: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>; fn i64_cmp_eq( &mut self, loc_a: Location<Self::GPR, Self::SIMD>, loc_b: Location<Self::GPR, Self::SIMD>, ret: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>; fn i64_clz( &mut self, loc: Location<Self::GPR, Self::SIMD>, ret: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>; fn i64_ctz( &mut self, loc: Location<Self::GPR, Self::SIMD>, ret: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>; fn i64_popcnt( &mut self, loc: Location<Self::GPR, Self::SIMD>, ret: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>; fn i64_shl( &mut self, loc_a: Location<Self::GPR, Self::SIMD>, loc_b: Location<Self::GPR, Self::SIMD>, ret: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>; fn i64_shr( &mut self, loc_a: Location<Self::GPR, Self::SIMD>, loc_b: Location<Self::GPR, Self::SIMD>, ret: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>; fn i64_sar( &mut self, loc_a: Location<Self::GPR, Self::SIMD>, loc_b: Location<Self::GPR, Self::SIMD>, ret: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>; fn i64_rol( &mut self, loc_a: Location<Self::GPR, Self::SIMD>, loc_b: Location<Self::GPR, Self::SIMD>, ret: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>; fn i64_ror( &mut self, loc_a: Location<Self::GPR, Self::SIMD>, loc_b: Location<Self::GPR, Self::SIMD>, ret: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>; fn i64_load( &mut self, addr: Location<Self::GPR, Self::SIMD>, memarg: &MemArg, ret: Location<Self::GPR, Self::SIMD>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>; fn i64_load_8u( &mut self, addr: Location<Self::GPR, Self::SIMD>, memarg: &MemArg, ret: Location<Self::GPR, Self::SIMD>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>; fn i64_load_8s( &mut self, addr: Location<Self::GPR, Self::SIMD>, memarg: &MemArg, ret: Location<Self::GPR, Self::SIMD>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>; fn i64_load_32u( &mut self, addr: Location<Self::GPR, Self::SIMD>, memarg: &MemArg, ret: Location<Self::GPR, Self::SIMD>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>; fn i64_load_32s( &mut self, addr: Location<Self::GPR, Self::SIMD>, memarg: &MemArg, ret: Location<Self::GPR, Self::SIMD>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>; fn i64_load_16u( &mut self, addr: Location<Self::GPR, Self::SIMD>, memarg: &MemArg, ret: Location<Self::GPR, Self::SIMD>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>; fn i64_load_16s( &mut self, addr: Location<Self::GPR, Self::SIMD>, memarg: &MemArg, ret: Location<Self::GPR, Self::SIMD>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>; fn i64_atomic_load( &mut self, addr: Location<Self::GPR, Self::SIMD>, memarg: &MemArg, ret: Location<Self::GPR, Self::SIMD>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>; fn i64_atomic_load_8u( &mut self, addr: Location<Self::GPR, Self::SIMD>, memarg: &MemArg, ret: Location<Self::GPR, Self::SIMD>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>; fn i64_atomic_load_16u( &mut self, addr: Location<Self::GPR, Self::SIMD>, memarg: &MemArg, ret: Location<Self::GPR, Self::SIMD>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>; fn i64_atomic_load_32u( &mut self, addr: Location<Self::GPR, Self::SIMD>, memarg: &MemArg, ret: Location<Self::GPR, Self::SIMD>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>; fn i64_save( &mut self, value: Location<Self::GPR, Self::SIMD>, memarg: &MemArg, addr: Location<Self::GPR, Self::SIMD>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>; fn i64_save_8( &mut self, value: Location<Self::GPR, Self::SIMD>, memarg: &MemArg, addr: Location<Self::GPR, Self::SIMD>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>; fn i64_save_16( &mut self, value: Location<Self::GPR, Self::SIMD>, memarg: &MemArg, addr: Location<Self::GPR, Self::SIMD>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>; fn i64_save_32( &mut self, value: Location<Self::GPR, Self::SIMD>, memarg: &MemArg, addr: Location<Self::GPR, Self::SIMD>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>; fn i64_atomic_save( &mut self, value: Location<Self::GPR, Self::SIMD>, memarg: &MemArg, addr: Location<Self::GPR, Self::SIMD>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>; fn i64_atomic_save_8( &mut self, value: Location<Self::GPR, Self::SIMD>, memarg: &MemArg, addr: Location<Self::GPR, Self::SIMD>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>; fn i64_atomic_save_16( &mut self, value: Location<Self::GPR, Self::SIMD>, memarg: &MemArg, addr: Location<Self::GPR, Self::SIMD>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>; fn i64_atomic_save_32( &mut self, value: Location<Self::GPR, Self::SIMD>, memarg: &MemArg, addr: Location<Self::GPR, Self::SIMD>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>; fn i64_atomic_add( &mut self, loc: Location<Self::GPR, Self::SIMD>, target: Location<Self::GPR, Self::SIMD>, memarg: &MemArg, ret: Location<Self::GPR, Self::SIMD>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>; fn i64_atomic_add_8u( &mut self, loc: Location<Self::GPR, Self::SIMD>, target: Location<Self::GPR, Self::SIMD>, memarg: &MemArg, ret: Location<Self::GPR, Self::SIMD>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>; fn i64_atomic_add_16u( &mut self, loc: Location<Self::GPR, Self::SIMD>, target: Location<Self::GPR, Self::SIMD>, memarg: &MemArg, ret: Location<Self::GPR, Self::SIMD>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>; fn i64_atomic_add_32u( &mut self, loc: Location<Self::GPR, Self::SIMD>, target: Location<Self::GPR, Self::SIMD>, memarg: &MemArg, ret: Location<Self::GPR, Self::SIMD>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>; fn i64_atomic_sub( &mut self, loc: Location<Self::GPR, Self::SIMD>, target: Location<Self::GPR, Self::SIMD>, memarg: &MemArg, ret: Location<Self::GPR, Self::SIMD>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>; fn i64_atomic_sub_8u( &mut self, loc: Location<Self::GPR, Self::SIMD>, target: Location<Self::GPR, Self::SIMD>, memarg: &MemArg, ret: Location<Self::GPR, Self::SIMD>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>; fn i64_atomic_sub_16u( &mut self, loc: Location<Self::GPR, Self::SIMD>, target: Location<Self::GPR, Self::SIMD>, memarg: &MemArg, ret: Location<Self::GPR, Self::SIMD>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>; fn i64_atomic_sub_32u( &mut self, loc: Location<Self::GPR, Self::SIMD>, target: Location<Self::GPR, Self::SIMD>, memarg: &MemArg, ret: Location<Self::GPR, Self::SIMD>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>; fn i64_atomic_and( &mut self, loc: Location<Self::GPR, Self::SIMD>, target: Location<Self::GPR, Self::SIMD>, memarg: &MemArg, ret: Location<Self::GPR, Self::SIMD>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>; fn i64_atomic_and_8u( &mut self, loc: Location<Self::GPR, Self::SIMD>, target: Location<Self::GPR, Self::SIMD>, memarg: &MemArg, ret: Location<Self::GPR, Self::SIMD>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>; fn i64_atomic_and_16u( &mut self, loc: Location<Self::GPR, Self::SIMD>, target: Location<Self::GPR, Self::SIMD>, memarg: &MemArg, ret: Location<Self::GPR, Self::SIMD>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>; fn i64_atomic_and_32u( &mut self, loc: Location<Self::GPR, Self::SIMD>, target: Location<Self::GPR, Self::SIMD>, memarg: &MemArg, ret: Location<Self::GPR, Self::SIMD>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>; fn i64_atomic_or( &mut self, loc: Location<Self::GPR, Self::SIMD>, target: Location<Self::GPR, Self::SIMD>, memarg: &MemArg, ret: Location<Self::GPR, Self::SIMD>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>; fn i64_atomic_or_8u( &mut self, loc: Location<Self::GPR, Self::SIMD>, target: Location<Self::GPR, Self::SIMD>, memarg: &MemArg, ret: Location<Self::GPR, Self::SIMD>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>; fn i64_atomic_or_16u( &mut self, loc: Location<Self::GPR, Self::SIMD>, target: Location<Self::GPR, Self::SIMD>, memarg: &MemArg, ret: Location<Self::GPR, Self::SIMD>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>; fn i64_atomic_or_32u( &mut self, loc: Location<Self::GPR, Self::SIMD>, target: Location<Self::GPR, Self::SIMD>, memarg: &MemArg, ret: Location<Self::GPR, Self::SIMD>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>; fn i64_atomic_xor( &mut self, loc: Location<Self::GPR, Self::SIMD>, target: Location<Self::GPR, Self::SIMD>, memarg: &MemArg, ret: Location<Self::GPR, Self::SIMD>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>; fn i64_atomic_xor_8u( &mut self, loc: Location<Self::GPR, Self::SIMD>, target: Location<Self::GPR, Self::SIMD>, memarg: &MemArg, ret: Location<Self::GPR, Self::SIMD>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>; fn i64_atomic_xor_16u( &mut self, loc: Location<Self::GPR, Self::SIMD>, target: Location<Self::GPR, Self::SIMD>, memarg: &MemArg, ret: Location<Self::GPR, Self::SIMD>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>; fn i64_atomic_xor_32u( &mut self, loc: Location<Self::GPR, Self::SIMD>, target: Location<Self::GPR, Self::SIMD>, memarg: &MemArg, ret: Location<Self::GPR, Self::SIMD>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>; fn i64_atomic_xchg( &mut self, loc: Location<Self::GPR, Self::SIMD>, target: Location<Self::GPR, Self::SIMD>, memarg: &MemArg, ret: Location<Self::GPR, Self::SIMD>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>; fn i64_atomic_xchg_8u( &mut self, loc: Location<Self::GPR, Self::SIMD>, target: Location<Self::GPR, Self::SIMD>, memarg: &MemArg, ret: Location<Self::GPR, Self::SIMD>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>; fn i64_atomic_xchg_16u( &mut self, loc: Location<Self::GPR, Self::SIMD>, target: Location<Self::GPR, Self::SIMD>, memarg: &MemArg, ret: Location<Self::GPR, Self::SIMD>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>; fn i64_atomic_xchg_32u( &mut self, loc: Location<Self::GPR, Self::SIMD>, target: Location<Self::GPR, Self::SIMD>, memarg: &MemArg, ret: Location<Self::GPR, Self::SIMD>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>; fn i64_atomic_cmpxchg( &mut self, new: Location<Self::GPR, Self::SIMD>, cmp: Location<Self::GPR, Self::SIMD>, target: Location<Self::GPR, Self::SIMD>, memarg: &MemArg, ret: Location<Self::GPR, Self::SIMD>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>; fn i64_atomic_cmpxchg_8u( &mut self, new: Location<Self::GPR, Self::SIMD>, cmp: Location<Self::GPR, Self::SIMD>, target: Location<Self::GPR, Self::SIMD>, memarg: &MemArg, ret: Location<Self::GPR, Self::SIMD>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>; fn i64_atomic_cmpxchg_16u( &mut self, new: Location<Self::GPR, Self::SIMD>, cmp: Location<Self::GPR, Self::SIMD>, target: Location<Self::GPR, Self::SIMD>, memarg: &MemArg, ret: Location<Self::GPR, Self::SIMD>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>; fn i64_atomic_cmpxchg_32u( &mut self, new: Location<Self::GPR, Self::SIMD>, cmp: Location<Self::GPR, Self::SIMD>, target: Location<Self::GPR, Self::SIMD>, memarg: &MemArg, ret: Location<Self::GPR, Self::SIMD>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>; fn f32_load( &mut self, addr: Location<Self::GPR, Self::SIMD>, memarg: &MemArg, ret: Location<Self::GPR, Self::SIMD>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>; fn f32_save( &mut self, value: Location<Self::GPR, Self::SIMD>, memarg: &MemArg, addr: Location<Self::GPR, Self::SIMD>, canonicalize: bool, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>; fn f64_load( &mut self, addr: Location<Self::GPR, Self::SIMD>, memarg: &MemArg, ret: Location<Self::GPR, Self::SIMD>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>; fn f64_save( &mut self, value: Location<Self::GPR, Self::SIMD>, memarg: &MemArg, addr: Location<Self::GPR, Self::SIMD>, canonicalize: bool, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>; fn convert_f64_i64( &mut self, loc: Location<Self::GPR, Self::SIMD>, signed: bool, ret: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>; fn convert_f64_i32( &mut self, loc: Location<Self::GPR, Self::SIMD>, signed: bool, ret: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>; fn convert_f32_i64( &mut self, loc: Location<Self::GPR, Self::SIMD>, signed: bool, ret: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>; fn convert_f32_i32( &mut self, loc: Location<Self::GPR, Self::SIMD>, signed: bool, ret: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>; fn convert_i64_f64( &mut self, loc: Location<Self::GPR, Self::SIMD>, ret: Location<Self::GPR, Self::SIMD>, signed: bool, sat: bool, ) -> Result<(), CompileError>; fn convert_i32_f64( &mut self, loc: Location<Self::GPR, Self::SIMD>, ret: Location<Self::GPR, Self::SIMD>, signed: bool, sat: bool, ) -> Result<(), CompileError>; fn convert_i64_f32( &mut self, loc: Location<Self::GPR, Self::SIMD>, ret: Location<Self::GPR, Self::SIMD>, signed: bool, sat: bool, ) -> Result<(), CompileError>; fn convert_i32_f32( &mut self, loc: Location<Self::GPR, Self::SIMD>, ret: Location<Self::GPR, Self::SIMD>, signed: bool, sat: bool, ) -> Result<(), CompileError>; fn convert_f64_f32( &mut self, loc: Location<Self::GPR, Self::SIMD>, ret: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>; fn convert_f32_f64( &mut self, loc: Location<Self::GPR, Self::SIMD>, ret: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>; fn f64_neg( &mut self, loc: Location<Self::GPR, Self::SIMD>, ret: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>; fn f64_abs( &mut self, loc: Location<Self::GPR, Self::SIMD>, ret: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>; fn emit_i64_copysign( &mut self, tmp1: Self::GPR, tmp2: Self::GPR, ) -> Result<(), CompileError>; fn f64_sqrt( &mut self, loc: Location<Self::GPR, Self::SIMD>, ret: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>; fn f64_trunc( &mut self, loc: Location<Self::GPR, Self::SIMD>, ret: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>; fn f64_ceil( &mut self, loc: Location<Self::GPR, Self::SIMD>, ret: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>; fn f64_floor( &mut self, loc: Location<Self::GPR, Self::SIMD>, ret: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>; fn f64_nearest( &mut self, loc: Location<Self::GPR, Self::SIMD>, ret: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>; fn f64_cmp_ge( &mut self, loc_a: Location<Self::GPR, Self::SIMD>, loc_b: Location<Self::GPR, Self::SIMD>, ret: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>; fn f64_cmp_gt( &mut self, loc_a: Location<Self::GPR, Self::SIMD>, loc_b: Location<Self::GPR, Self::SIMD>, ret: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>; fn f64_cmp_le( &mut self, loc_a: Location<Self::GPR, Self::SIMD>, loc_b: Location<Self::GPR, Self::SIMD>, ret: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>; fn f64_cmp_lt( &mut self, loc_a: Location<Self::GPR, Self::SIMD>, loc_b: Location<Self::GPR, Self::SIMD>, ret: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>; fn f64_cmp_ne( &mut self, loc_a: Location<Self::GPR, Self::SIMD>, loc_b: Location<Self::GPR, Self::SIMD>, ret: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>; fn f64_cmp_eq( &mut self, loc_a: Location<Self::GPR, Self::SIMD>, loc_b: Location<Self::GPR, Self::SIMD>, ret: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>; fn f64_min( &mut self, loc_a: Location<Self::GPR, Self::SIMD>, loc_b: Location<Self::GPR, Self::SIMD>, ret: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>; fn f64_max( &mut self, loc_a: Location<Self::GPR, Self::SIMD>, loc_b: Location<Self::GPR, Self::SIMD>, ret: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>; fn f64_add( &mut self, loc_a: Location<Self::GPR, Self::SIMD>, loc_b: Location<Self::GPR, Self::SIMD>, ret: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>; fn f64_sub( &mut self, loc_a: Location<Self::GPR, Self::SIMD>, loc_b: Location<Self::GPR, Self::SIMD>, ret: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>; fn f64_mul( &mut self, loc_a: Location<Self::GPR, Self::SIMD>, loc_b: Location<Self::GPR, Self::SIMD>, ret: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>; fn f64_div( &mut self, loc_a: Location<Self::GPR, Self::SIMD>, loc_b: Location<Self::GPR, Self::SIMD>, ret: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>; fn f32_neg( &mut self, loc: Location<Self::GPR, Self::SIMD>, ret: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>; fn f32_abs( &mut self, loc: Location<Self::GPR, Self::SIMD>, ret: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>; fn emit_i32_copysign( &mut self, tmp1: Self::GPR, tmp2: Self::GPR, ) -> Result<(), CompileError>; fn f32_sqrt( &mut self, loc: Location<Self::GPR, Self::SIMD>, ret: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>; fn f32_trunc( &mut self, loc: Location<Self::GPR, Self::SIMD>, ret: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>; fn f32_ceil( &mut self, loc: Location<Self::GPR, Self::SIMD>, ret: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>; fn f32_floor( &mut self, loc: Location<Self::GPR, Self::SIMD>, ret: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>; fn f32_nearest( &mut self, loc: Location<Self::GPR, Self::SIMD>, ret: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>; fn f32_cmp_ge( &mut self, loc_a: Location<Self::GPR, Self::SIMD>, loc_b: Location<Self::GPR, Self::SIMD>, ret: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>; fn f32_cmp_gt( &mut self, loc_a: Location<Self::GPR, Self::SIMD>, loc_b: Location<Self::GPR, Self::SIMD>, ret: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>; fn f32_cmp_le( &mut self, loc_a: Location<Self::GPR, Self::SIMD>, loc_b: Location<Self::GPR, Self::SIMD>, ret: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>; fn f32_cmp_lt( &mut self, loc_a: Location<Self::GPR, Self::SIMD>, loc_b: Location<Self::GPR, Self::SIMD>, ret: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>; fn f32_cmp_ne( &mut self, loc_a: Location<Self::GPR, Self::SIMD>, loc_b: Location<Self::GPR, Self::SIMD>, ret: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>; fn f32_cmp_eq( &mut self, loc_a: Location<Self::GPR, Self::SIMD>, loc_b: Location<Self::GPR, Self::SIMD>, ret: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>; fn f32_min( &mut self, loc_a: Location<Self::GPR, Self::SIMD>, loc_b: Location<Self::GPR, Self::SIMD>, ret: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>; fn f32_max( &mut self, loc_a: Location<Self::GPR, Self::SIMD>, loc_b: Location<Self::GPR, Self::SIMD>, ret: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>; fn f32_add( &mut self, loc_a: Location<Self::GPR, Self::SIMD>, loc_b: Location<Self::GPR, Self::SIMD>, ret: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>; fn f32_sub( &mut self, loc_a: Location<Self::GPR, Self::SIMD>, loc_b: Location<Self::GPR, Self::SIMD>, ret: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>; fn f32_mul( &mut self, loc_a: Location<Self::GPR, Self::SIMD>, loc_b: Location<Self::GPR, Self::SIMD>, ret: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>; fn f32_div( &mut self, loc_a: Location<Self::GPR, Self::SIMD>, loc_b: Location<Self::GPR, Self::SIMD>, ret: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>; fn gen_std_trampoline( &self, sig: &FunctionType, calling_convention: CallingConvention, ) -> Result<FunctionBody, CompileError>; fn gen_std_dynamic_import_trampoline( &self, vmoffsets: &VMOffsets, sig: &FunctionType, calling_convention: CallingConvention, ) -> Result<FunctionBody, CompileError>; fn gen_import_call_trampoline( &self, vmoffsets: &VMOffsets, index: FunctionIndex, sig: &FunctionType, calling_convention: CallingConvention, ) -> Result<CustomSection, CompileError>; fn gen_dwarf_unwind_info( &mut self, code_len: usize, ) -> Option<UnwindInstructions>; fn gen_windows_unwind_info(&mut self, code_len: usize) -> Option<Vec<u8>>;
}

Required Associated Types§

source

type GPR: Copy + Eq + Debug + Reg

source

type SIMD: Copy + Eq + Debug + Reg

Required Methods§

source

fn assembler_get_offset(&self) -> AssemblyOffset

Get current assembler offset

source

fn index_from_gpr(&self, x: Self::GPR) -> RegisterIndex

Convert from a GPR register to index register

source

fn index_from_simd(&self, x: Self::SIMD) -> RegisterIndex

Convert from an SIMD register

source

fn get_vmctx_reg(&self) -> Self::GPR

Get the GPR that hold vmctx

source

fn pick_gpr(&self) -> Option<Self::GPR>

Picks an unused general purpose register for local/stack/argument use.

This method does not mark the register as used

source

fn pick_temp_gpr(&self) -> Option<Self::GPR>

Picks an unused general purpose register for internal temporary use.

This method does not mark the register as used

source

fn get_used_gprs(&self) -> Vec<Self::GPR>

Get all used GPR

source

fn get_used_simd(&self) -> Vec<Self::SIMD>

Get all used SIMD regs

source

fn acquire_temp_gpr(&mut self) -> Option<Self::GPR>

Picks an unused general pupose register and mark it as used

source

fn release_gpr(&mut self, gpr: Self::GPR)

Releases a temporary GPR.

source

fn reserve_unused_temp_gpr(&mut self, gpr: Self::GPR) -> Self::GPR

Specify that a given register is in use.

source

fn reserve_gpr(&mut self, gpr: Self::GPR)

reserve a GPR

source

fn push_used_gpr(&mut self, grps: &[Self::GPR]) -> Result<usize, CompileError>

Push used gpr to the stack. Return the bytes taken on the stack

source

fn pop_used_gpr(&mut self, grps: &[Self::GPR]) -> Result<(), CompileError>

Pop used gpr to the stack

source

fn pick_simd(&self) -> Option<Self::SIMD>

Picks an unused SIMD register.

This method does not mark the register as used

source

fn pick_temp_simd(&self) -> Option<Self::SIMD>

Picks an unused SIMD register for internal temporary use.

This method does not mark the register as used

source

fn acquire_temp_simd(&mut self) -> Option<Self::SIMD>

Acquires a temporary XMM register.

source

fn reserve_simd(&mut self, simd: Self::SIMD)

reserve a SIMD register

source

fn release_simd(&mut self, simd: Self::SIMD)

Releases a temporary XMM register.

source

fn push_used_simd( &mut self, simds: &[Self::SIMD], ) -> Result<usize, CompileError>

Push used simd regs to the stack. Return bytes taken on the stack

source

fn pop_used_simd(&mut self, simds: &[Self::SIMD]) -> Result<(), CompileError>

Pop used simd regs to the stack

source

fn round_stack_adjust(&self, value: usize) -> usize

Return a rounded stack adjustement value (must be multiple of 16bytes on ARM64 for example)

source

fn set_srcloc(&mut self, offset: u32)

Set the source location of the Wasm to the given offset.

source

fn mark_address_range_with_trap_code( &mut self, code: TrapCode, begin: usize, end: usize, )

Marks each address in the code range emitted by f with the trap code code.

source

fn mark_address_with_trap_code(&mut self, code: TrapCode)

Marks one address as trappable with trap code code.

source

fn mark_instruction_with_trap_code(&mut self, code: TrapCode) -> usize

Marks the instruction as trappable with trap code code. return “begin” offset

source

fn mark_instruction_address_end(&mut self, begin: usize)

Pushes the instruction to the address map, calculating the offset from a provided beginning address.

source

fn insert_stackoverflow(&mut self)

Insert a StackOverflow (at offset 0)

source

fn collect_trap_information(&self) -> Vec<TrapInformation>

Get all current TrapInformation

source

fn instructions_address_map(&self) -> Vec<InstructionAddressMap>

source

fn local_on_stack( &mut self, stack_offset: i32, ) -> Location<Self::GPR, Self::SIMD>

Memory location for a local on the stack Like Location::Memory(GPR::RBP, -(self.stack_offset.0 as i32)) for x86_64

source

fn adjust_stack(&mut self, delta_stack_offset: u32) -> Result<(), CompileError>

Adjust stack for locals Like assembler.emit_sub(Size::S64, Location::Imm32(delta_stack_offset as u32), Location::GPR(GPR::RSP))

source

fn restore_stack(&mut self, delta_stack_offset: u32) -> Result<(), CompileError>

restore stack Like assembler.emit_add(Size::S64, Location::Imm32(delta_stack_offset as u32), Location::GPR(GPR::RSP))

source

fn pop_stack_locals( &mut self, delta_stack_offset: u32, ) -> Result<(), CompileError>

Pop stack of locals Like assembler.emit_add(Size::S64, Location::Imm32(delta_stack_offset as u32), Location::GPR(GPR::RSP))

source

fn zero_location( &mut self, size: Size, location: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>

Zero a location taht is 32bits

source

fn local_pointer(&self) -> Self::GPR

GPR Reg used for local pointer on the stack

source

fn move_location_for_native( &mut self, size: Size, loc: Location<Self::GPR, Self::SIMD>, dest: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>

push a value on the stack for a native call

source

fn is_local_on_stack(&self, idx: usize) -> bool

Determine whether a local should be allocated on the stack.

source

fn get_local_location( &self, idx: usize, callee_saved_regs_size: usize, ) -> Location<Self::GPR, Self::SIMD>

Determine a local’s location.

source

fn move_local( &mut self, stack_offset: i32, location: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>

Move a local to the stack Like emit_mov(Size::S64, location, Location::Memory(GPR::RBP, -(self.stack_offset.0 as i32)));

source

fn list_to_save( &self, calling_convention: CallingConvention, ) -> Vec<Location<Self::GPR, Self::SIMD>>

List of register to save, depending on the CallingConvention

source

fn get_param_location( &self, idx: usize, sz: Size, stack_offset: &mut usize, calling_convention: CallingConvention, ) -> Location<Self::GPR, Self::SIMD>

Get param location (to build a call, using SP for stack args)

source

fn get_call_param_location( &self, idx: usize, sz: Size, stack_offset: &mut usize, calling_convention: CallingConvention, ) -> Location<Self::GPR, Self::SIMD>

Get call param location (from a call, using FP for stack args)

source

fn get_simple_param_location( &self, idx: usize, calling_convention: CallingConvention, ) -> Location<Self::GPR, Self::SIMD>

Get simple param location

source

fn move_location( &mut self, size: Size, source: Location<Self::GPR, Self::SIMD>, dest: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>

move a location to another

source

fn move_location_extend( &mut self, size_val: Size, signed: bool, source: Location<Self::GPR, Self::SIMD>, size_op: Size, dest: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>

move a location to another, with zero or sign extension

source

fn load_address( &mut self, size: Size, gpr: Location<Self::GPR, Self::SIMD>, mem: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>

Load a memory value to a register, zero extending to 64bits. Panic if gpr is not a Location::GPR or if mem is not a Memory(2)

source

fn init_stack_loc( &mut self, init_stack_loc_cnt: u64, last_stack_loc: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>

Init the stack loc counter

source

fn restore_saved_area( &mut self, saved_area_offset: i32, ) -> Result<(), CompileError>

Restore save_area

source

fn pop_location( &mut self, location: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>

Pop a location

source

fn new_machine_state(&self) -> MachineState

Create a new MachineState with default values.

source

fn assembler_finalize(self) -> Result<Vec<u8>, CompileError>

Finalize the assembler

source

fn get_offset(&self) -> AssemblyOffset

get_offset of Assembler

source

fn finalize_function(&mut self) -> Result<(), CompileError>

finalize a function

source

fn emit_function_prolog(&mut self) -> Result<(), CompileError>

emit native function prolog (depending on the calling Convention, like “PUSH RBP / MOV RSP, RBP”)

source

fn emit_function_epilog(&mut self) -> Result<(), CompileError>

emit native function epilog (depending on the calling Convention, like “MOV RBP, RSP / POP RBP”)

source

fn emit_function_return_value( &mut self, ty: WpType, cannonicalize: bool, loc: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>

handle return value, with optionnal cannonicalization if wanted

source

fn emit_function_return_float(&mut self) -> Result<(), CompileError>

Handle copy to SIMD register from ret value (if needed by the arch/calling convention)

source

fn arch_supports_canonicalize_nan(&self) -> bool

Is NaN canonicalization supported

source

fn canonicalize_nan( &mut self, sz: Size, input: Location<Self::GPR, Self::SIMD>, output: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>

Cannonicalize a NaN (or panic if not supported)

source

fn emit_illegal_op(&mut self, trp: TrapCode) -> Result<(), CompileError>

emit an Illegal Opcode, associated with a trapcode

source

fn get_label(&mut self) -> DynamicLabel

create a new label

source

fn emit_label(&mut self, label: DynamicLabel) -> Result<(), CompileError>

emit a label

source

fn get_grp_for_call(&self) -> Self::GPR

get the gpr use for call. like RAX on x86_64

source

fn emit_call_register( &mut self, register: Self::GPR, ) -> Result<(), CompileError>

Emit a call using the value in register

source

fn emit_call_label(&mut self, label: DynamicLabel) -> Result<(), CompileError>

Emit a call to a label

source

fn arch_requires_indirect_call_trampoline(&self) -> bool

Does an trampoline is neededfor indirect call

source

fn arch_emit_indirect_call_with_trampoline( &mut self, location: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>

indirect call with trampoline

source

fn emit_call_location( &mut self, location: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>

emit a call to a location

source

fn get_gpr_for_ret(&self) -> Self::GPR

get the gpr for the return of generic values

source

fn get_simd_for_ret(&self) -> Self::SIMD

get the simd for the return of float/double values

source

fn emit_debug_breakpoint(&mut self) -> Result<(), CompileError>

Emit a debug breakpoint

source

fn location_address( &mut self, size: Size, source: Location<Self::GPR, Self::SIMD>, dest: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>

load the address of a memory location (will panic if src is not a memory) like LEA opcode on x86_64

source

fn location_and( &mut self, size: Size, source: Location<Self::GPR, Self::SIMD>, dest: Location<Self::GPR, Self::SIMD>, flags: bool, ) -> Result<(), CompileError>

And src & dst -> dst (with or without flags)

source

fn location_xor( &mut self, size: Size, source: Location<Self::GPR, Self::SIMD>, dest: Location<Self::GPR, Self::SIMD>, flags: bool, ) -> Result<(), CompileError>

Xor src & dst -> dst (with or without flags)

source

fn location_or( &mut self, size: Size, source: Location<Self::GPR, Self::SIMD>, dest: Location<Self::GPR, Self::SIMD>, flags: bool, ) -> Result<(), CompileError>

Or src & dst -> dst (with or without flags)

source

fn location_add( &mut self, size: Size, source: Location<Self::GPR, Self::SIMD>, dest: Location<Self::GPR, Self::SIMD>, flags: bool, ) -> Result<(), CompileError>

Add src+dst -> dst (with or without flags)

source

fn location_sub( &mut self, size: Size, source: Location<Self::GPR, Self::SIMD>, dest: Location<Self::GPR, Self::SIMD>, flags: bool, ) -> Result<(), CompileError>

Sub dst-src -> dst (with or without flags)

source

fn location_neg( &mut self, size_val: Size, signed: bool, source: Location<Self::GPR, Self::SIMD>, size_op: Size, dest: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>

-src -> dst

source

fn location_cmp( &mut self, size: Size, source: Location<Self::GPR, Self::SIMD>, dest: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>

Cmp src - dst and set flags

source

fn location_test( &mut self, size: Size, source: Location<Self::GPR, Self::SIMD>, dest: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>

Test src & dst and set flags

source

fn jmp_unconditionnal( &mut self, label: DynamicLabel, ) -> Result<(), CompileError>

jmp without condidtion

source

fn jmp_on_equal(&mut self, label: DynamicLabel) -> Result<(), CompileError>

jmp on equal (src==dst) like Equal set on x86_64

source

fn jmp_on_different(&mut self, label: DynamicLabel) -> Result<(), CompileError>

jmp on different (src!=dst) like NotEqual set on x86_64

source

fn jmp_on_above(&mut self, label: DynamicLabel) -> Result<(), CompileError>

jmp on above (src>dst) like Above set on x86_64

source

fn jmp_on_aboveequal(&mut self, label: DynamicLabel) -> Result<(), CompileError>

jmp on above (src>=dst) like Above or Equal set on x86_64

source

fn jmp_on_belowequal(&mut self, label: DynamicLabel) -> Result<(), CompileError>

jmp on above (src<=dst) like Below or Equal set on x86_64

source

fn jmp_on_overflow(&mut self, label: DynamicLabel) -> Result<(), CompileError>

jmp on overflow like Carry set on x86_64

source

fn emit_jmp_to_jumptable( &mut self, label: DynamicLabel, cond: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>

jmp using a jump table at lable with cond as the indice

source

fn align_for_loop(&mut self) -> Result<(), CompileError>

Align for Loop (may do nothing, depending on the arch)

source

fn emit_ret(&mut self) -> Result<(), CompileError>

ret (from a Call)

source

fn emit_push( &mut self, size: Size, loc: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>

Stack push of a location

source

fn emit_pop( &mut self, size: Size, loc: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>

Stack pop of a location

source

fn emit_relaxed_mov( &mut self, sz: Size, src: Location<Self::GPR, Self::SIMD>, dst: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>

relaxed mov: move from anywhere to anywhere

source

fn emit_relaxed_cmp( &mut self, sz: Size, src: Location<Self::GPR, Self::SIMD>, dst: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>

relaxed cmp: compare from anywhere and anywhere

source

fn emit_memory_fence(&mut self) -> Result<(), CompileError>

Emit a memory fence. Can be nothing for x86_64 or a DMB on ARM64 for example

source

fn emit_relaxed_zero_extension( &mut self, sz_src: Size, src: Location<Self::GPR, Self::SIMD>, sz_dst: Size, dst: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>

relaxed move with zero extension

source

fn emit_relaxed_sign_extension( &mut self, sz_src: Size, src: Location<Self::GPR, Self::SIMD>, sz_dst: Size, dst: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>

relaxed move with sign extension

source

fn emit_imul_imm32( &mut self, size: Size, imm32: u32, gpr: Self::GPR, ) -> Result<(), CompileError>

Multiply location with immediate

source

fn emit_binop_add32( &mut self, loc_a: Location<Self::GPR, Self::SIMD>, loc_b: Location<Self::GPR, Self::SIMD>, ret: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>

Add with location directly from the stack

source

fn emit_binop_sub32( &mut self, loc_a: Location<Self::GPR, Self::SIMD>, loc_b: Location<Self::GPR, Self::SIMD>, ret: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>

Sub with location directly from the stack

source

fn emit_binop_mul32( &mut self, loc_a: Location<Self::GPR, Self::SIMD>, loc_b: Location<Self::GPR, Self::SIMD>, ret: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>

Multiply with location directly from the stack

source

fn emit_binop_udiv32( &mut self, loc_a: Location<Self::GPR, Self::SIMD>, loc_b: Location<Self::GPR, Self::SIMD>, ret: Location<Self::GPR, Self::SIMD>, integer_division_by_zero: DynamicLabel, integer_overflow: DynamicLabel, ) -> Result<usize, CompileError>

Unsigned Division with location directly from the stack. return the offset of the DIV opcode, to mark as trappable.

source

fn emit_binop_sdiv32( &mut self, loc_a: Location<Self::GPR, Self::SIMD>, loc_b: Location<Self::GPR, Self::SIMD>, ret: Location<Self::GPR, Self::SIMD>, integer_division_by_zero: DynamicLabel, integer_overflow: DynamicLabel, ) -> Result<usize, CompileError>

Signed Division with location directly from the stack. return the offset of the DIV opcode, to mark as trappable.

source

fn emit_binop_urem32( &mut self, loc_a: Location<Self::GPR, Self::SIMD>, loc_b: Location<Self::GPR, Self::SIMD>, ret: Location<Self::GPR, Self::SIMD>, integer_division_by_zero: DynamicLabel, integer_overflow: DynamicLabel, ) -> Result<usize, CompileError>

Unsigned Reminder (of a division) with location directly from the stack. return the offset of the DIV opcode, to mark as trappable.

source

fn emit_binop_srem32( &mut self, loc_a: Location<Self::GPR, Self::SIMD>, loc_b: Location<Self::GPR, Self::SIMD>, ret: Location<Self::GPR, Self::SIMD>, integer_division_by_zero: DynamicLabel, integer_overflow: DynamicLabel, ) -> Result<usize, CompileError>

Signed Reminder (of a Division) with location directly from the stack. return the offset of the DIV opcode, to mark as trappable.

source

fn emit_binop_and32( &mut self, loc_a: Location<Self::GPR, Self::SIMD>, loc_b: Location<Self::GPR, Self::SIMD>, ret: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>

And with location directly from the stack

source

fn emit_binop_or32( &mut self, loc_a: Location<Self::GPR, Self::SIMD>, loc_b: Location<Self::GPR, Self::SIMD>, ret: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>

Or with location directly from the stack

source

fn emit_binop_xor32( &mut self, loc_a: Location<Self::GPR, Self::SIMD>, loc_b: Location<Self::GPR, Self::SIMD>, ret: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>

Xor with location directly from the stack

source

fn i32_cmp_ge_s( &mut self, loc_a: Location<Self::GPR, Self::SIMD>, loc_b: Location<Self::GPR, Self::SIMD>, ret: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>

Signed Greater of Equal Compare 2 i32, result in a GPR

source

fn i32_cmp_gt_s( &mut self, loc_a: Location<Self::GPR, Self::SIMD>, loc_b: Location<Self::GPR, Self::SIMD>, ret: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>

Signed Greater Than Compare 2 i32, result in a GPR

source

fn i32_cmp_le_s( &mut self, loc_a: Location<Self::GPR, Self::SIMD>, loc_b: Location<Self::GPR, Self::SIMD>, ret: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>

Signed Less of Equal Compare 2 i32, result in a GPR

source

fn i32_cmp_lt_s( &mut self, loc_a: Location<Self::GPR, Self::SIMD>, loc_b: Location<Self::GPR, Self::SIMD>, ret: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>

Signed Less Than Compare 2 i32, result in a GPR

source

fn i32_cmp_ge_u( &mut self, loc_a: Location<Self::GPR, Self::SIMD>, loc_b: Location<Self::GPR, Self::SIMD>, ret: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>

Unsigned Greater of Equal Compare 2 i32, result in a GPR

source

fn i32_cmp_gt_u( &mut self, loc_a: Location<Self::GPR, Self::SIMD>, loc_b: Location<Self::GPR, Self::SIMD>, ret: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>

Unsigned Greater Than Compare 2 i32, result in a GPR

source

fn i32_cmp_le_u( &mut self, loc_a: Location<Self::GPR, Self::SIMD>, loc_b: Location<Self::GPR, Self::SIMD>, ret: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>

Unsigned Less of Equal Compare 2 i32, result in a GPR

source

fn i32_cmp_lt_u( &mut self, loc_a: Location<Self::GPR, Self::SIMD>, loc_b: Location<Self::GPR, Self::SIMD>, ret: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>

Unsigned Less Than Compare 2 i32, result in a GPR

source

fn i32_cmp_ne( &mut self, loc_a: Location<Self::GPR, Self::SIMD>, loc_b: Location<Self::GPR, Self::SIMD>, ret: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>

Not Equal Compare 2 i32, result in a GPR

source

fn i32_cmp_eq( &mut self, loc_a: Location<Self::GPR, Self::SIMD>, loc_b: Location<Self::GPR, Self::SIMD>, ret: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>

Equal Compare 2 i32, result in a GPR

source

fn i32_clz( &mut self, loc: Location<Self::GPR, Self::SIMD>, ret: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>

Count Leading 0 bit of an i32

source

fn i32_ctz( &mut self, loc: Location<Self::GPR, Self::SIMD>, ret: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>

Count Trailling 0 bit of an i32

source

fn i32_popcnt( &mut self, loc: Location<Self::GPR, Self::SIMD>, ret: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>

Count the number of 1 bit of an i32

source

fn i32_shl( &mut self, loc_a: Location<Self::GPR, Self::SIMD>, loc_b: Location<Self::GPR, Self::SIMD>, ret: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>

i32 Logical Shift Left

source

fn i32_shr( &mut self, loc_a: Location<Self::GPR, Self::SIMD>, loc_b: Location<Self::GPR, Self::SIMD>, ret: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>

i32 Logical Shift Right

source

fn i32_sar( &mut self, loc_a: Location<Self::GPR, Self::SIMD>, loc_b: Location<Self::GPR, Self::SIMD>, ret: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>

i32 Arithmetic Shift Right

source

fn i32_rol( &mut self, loc_a: Location<Self::GPR, Self::SIMD>, loc_b: Location<Self::GPR, Self::SIMD>, ret: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>

i32 Roll Left

source

fn i32_ror( &mut self, loc_a: Location<Self::GPR, Self::SIMD>, loc_b: Location<Self::GPR, Self::SIMD>, ret: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>

i32 Roll Right

source

fn i32_load( &mut self, addr: Location<Self::GPR, Self::SIMD>, memarg: &MemArg, ret: Location<Self::GPR, Self::SIMD>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>

i32 load

source

fn i32_load_8u( &mut self, addr: Location<Self::GPR, Self::SIMD>, memarg: &MemArg, ret: Location<Self::GPR, Self::SIMD>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>

i32 load of an unsigned 8bits

source

fn i32_load_8s( &mut self, addr: Location<Self::GPR, Self::SIMD>, memarg: &MemArg, ret: Location<Self::GPR, Self::SIMD>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>

i32 load of an signed 8bits

source

fn i32_load_16u( &mut self, addr: Location<Self::GPR, Self::SIMD>, memarg: &MemArg, ret: Location<Self::GPR, Self::SIMD>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>

i32 load of an unsigned 16bits

source

fn i32_load_16s( &mut self, addr: Location<Self::GPR, Self::SIMD>, memarg: &MemArg, ret: Location<Self::GPR, Self::SIMD>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>

i32 load of an signed 16bits

source

fn i32_atomic_load( &mut self, addr: Location<Self::GPR, Self::SIMD>, memarg: &MemArg, ret: Location<Self::GPR, Self::SIMD>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>

i32 atomic load

source

fn i32_atomic_load_8u( &mut self, addr: Location<Self::GPR, Self::SIMD>, memarg: &MemArg, ret: Location<Self::GPR, Self::SIMD>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>

i32 atomic load of an unsigned 8bits

source

fn i32_atomic_load_16u( &mut self, addr: Location<Self::GPR, Self::SIMD>, memarg: &MemArg, ret: Location<Self::GPR, Self::SIMD>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>

i32 atomic load of an unsigned 16bits

source

fn i32_save( &mut self, value: Location<Self::GPR, Self::SIMD>, memarg: &MemArg, addr: Location<Self::GPR, Self::SIMD>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>

i32 save

source

fn i32_save_8( &mut self, value: Location<Self::GPR, Self::SIMD>, memarg: &MemArg, addr: Location<Self::GPR, Self::SIMD>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>

i32 save of the lower 8bits

source

fn i32_save_16( &mut self, value: Location<Self::GPR, Self::SIMD>, memarg: &MemArg, addr: Location<Self::GPR, Self::SIMD>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>

i32 save of the lower 16bits

source

fn i32_atomic_save( &mut self, value: Location<Self::GPR, Self::SIMD>, memarg: &MemArg, addr: Location<Self::GPR, Self::SIMD>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>

i32 atomic save

source

fn i32_atomic_save_8( &mut self, value: Location<Self::GPR, Self::SIMD>, memarg: &MemArg, addr: Location<Self::GPR, Self::SIMD>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>

i32 atomic save of a the lower 8bits

source

fn i32_atomic_save_16( &mut self, value: Location<Self::GPR, Self::SIMD>, memarg: &MemArg, addr: Location<Self::GPR, Self::SIMD>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>

i32 atomic save of a the lower 16bits

source

fn i32_atomic_add( &mut self, loc: Location<Self::GPR, Self::SIMD>, target: Location<Self::GPR, Self::SIMD>, memarg: &MemArg, ret: Location<Self::GPR, Self::SIMD>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>

i32 atomic Add with i32

source

fn i32_atomic_add_8u( &mut self, loc: Location<Self::GPR, Self::SIMD>, target: Location<Self::GPR, Self::SIMD>, memarg: &MemArg, ret: Location<Self::GPR, Self::SIMD>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>

i32 atomic Add with unsigned 8bits

source

fn i32_atomic_add_16u( &mut self, loc: Location<Self::GPR, Self::SIMD>, target: Location<Self::GPR, Self::SIMD>, memarg: &MemArg, ret: Location<Self::GPR, Self::SIMD>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>

i32 atomic Add with unsigned 16bits

source

fn i32_atomic_sub( &mut self, loc: Location<Self::GPR, Self::SIMD>, target: Location<Self::GPR, Self::SIMD>, memarg: &MemArg, ret: Location<Self::GPR, Self::SIMD>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>

i32 atomic Sub with i32

source

fn i32_atomic_sub_8u( &mut self, loc: Location<Self::GPR, Self::SIMD>, target: Location<Self::GPR, Self::SIMD>, memarg: &MemArg, ret: Location<Self::GPR, Self::SIMD>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>

i32 atomic Sub with unsigned 8bits

source

fn i32_atomic_sub_16u( &mut self, loc: Location<Self::GPR, Self::SIMD>, target: Location<Self::GPR, Self::SIMD>, memarg: &MemArg, ret: Location<Self::GPR, Self::SIMD>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>

i32 atomic Sub with unsigned 16bits

source

fn i32_atomic_and( &mut self, loc: Location<Self::GPR, Self::SIMD>, target: Location<Self::GPR, Self::SIMD>, memarg: &MemArg, ret: Location<Self::GPR, Self::SIMD>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>

i32 atomic And with i32

source

fn i32_atomic_and_8u( &mut self, loc: Location<Self::GPR, Self::SIMD>, target: Location<Self::GPR, Self::SIMD>, memarg: &MemArg, ret: Location<Self::GPR, Self::SIMD>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>

i32 atomic And with unsigned 8bits

source

fn i32_atomic_and_16u( &mut self, loc: Location<Self::GPR, Self::SIMD>, target: Location<Self::GPR, Self::SIMD>, memarg: &MemArg, ret: Location<Self::GPR, Self::SIMD>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>

i32 atomic And with unsigned 16bits

source

fn i32_atomic_or( &mut self, loc: Location<Self::GPR, Self::SIMD>, target: Location<Self::GPR, Self::SIMD>, memarg: &MemArg, ret: Location<Self::GPR, Self::SIMD>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>

i32 atomic Or with i32

source

fn i32_atomic_or_8u( &mut self, loc: Location<Self::GPR, Self::SIMD>, target: Location<Self::GPR, Self::SIMD>, memarg: &MemArg, ret: Location<Self::GPR, Self::SIMD>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>

i32 atomic Or with unsigned 8bits

source

fn i32_atomic_or_16u( &mut self, loc: Location<Self::GPR, Self::SIMD>, target: Location<Self::GPR, Self::SIMD>, memarg: &MemArg, ret: Location<Self::GPR, Self::SIMD>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>

i32 atomic Or with unsigned 16bits

source

fn i32_atomic_xor( &mut self, loc: Location<Self::GPR, Self::SIMD>, target: Location<Self::GPR, Self::SIMD>, memarg: &MemArg, ret: Location<Self::GPR, Self::SIMD>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>

i32 atomic Xor with i32

source

fn i32_atomic_xor_8u( &mut self, loc: Location<Self::GPR, Self::SIMD>, target: Location<Self::GPR, Self::SIMD>, memarg: &MemArg, ret: Location<Self::GPR, Self::SIMD>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>

i32 atomic Xor with unsigned 8bits

source

fn i32_atomic_xor_16u( &mut self, loc: Location<Self::GPR, Self::SIMD>, target: Location<Self::GPR, Self::SIMD>, memarg: &MemArg, ret: Location<Self::GPR, Self::SIMD>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>

i32 atomic Xor with unsigned 16bits

source

fn i32_atomic_xchg( &mut self, loc: Location<Self::GPR, Self::SIMD>, target: Location<Self::GPR, Self::SIMD>, memarg: &MemArg, ret: Location<Self::GPR, Self::SIMD>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>

i32 atomic Exchange with i32

source

fn i32_atomic_xchg_8u( &mut self, loc: Location<Self::GPR, Self::SIMD>, target: Location<Self::GPR, Self::SIMD>, memarg: &MemArg, ret: Location<Self::GPR, Self::SIMD>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>

i32 atomic Exchange with u8

source

fn i32_atomic_xchg_16u( &mut self, loc: Location<Self::GPR, Self::SIMD>, target: Location<Self::GPR, Self::SIMD>, memarg: &MemArg, ret: Location<Self::GPR, Self::SIMD>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>

i32 atomic Exchange with u16

source

fn i32_atomic_cmpxchg( &mut self, new: Location<Self::GPR, Self::SIMD>, cmp: Location<Self::GPR, Self::SIMD>, target: Location<Self::GPR, Self::SIMD>, memarg: &MemArg, ret: Location<Self::GPR, Self::SIMD>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>

i32 atomic Compare and Exchange with i32

source

fn i32_atomic_cmpxchg_8u( &mut self, new: Location<Self::GPR, Self::SIMD>, cmp: Location<Self::GPR, Self::SIMD>, target: Location<Self::GPR, Self::SIMD>, memarg: &MemArg, ret: Location<Self::GPR, Self::SIMD>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>

i32 atomic Compare and Exchange with u8

source

fn i32_atomic_cmpxchg_16u( &mut self, new: Location<Self::GPR, Self::SIMD>, cmp: Location<Self::GPR, Self::SIMD>, target: Location<Self::GPR, Self::SIMD>, memarg: &MemArg, ret: Location<Self::GPR, Self::SIMD>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>

i32 atomic Compare and Exchange with u16

source

fn emit_call_with_reloc( &mut self, calling_convention: CallingConvention, reloc_target: RelocationTarget, ) -> Result<Vec<Relocation>, CompileError>

emit a move function address to GPR ready for call, using appropriate relocation

source

fn emit_binop_add64( &mut self, loc_a: Location<Self::GPR, Self::SIMD>, loc_b: Location<Self::GPR, Self::SIMD>, ret: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>

Add with location directly from the stack

source

fn emit_binop_sub64( &mut self, loc_a: Location<Self::GPR, Self::SIMD>, loc_b: Location<Self::GPR, Self::SIMD>, ret: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>

Sub with location directly from the stack

source

fn emit_binop_mul64( &mut self, loc_a: Location<Self::GPR, Self::SIMD>, loc_b: Location<Self::GPR, Self::SIMD>, ret: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>

Multiply with location directly from the stack

source

fn emit_binop_udiv64( &mut self, loc_a: Location<Self::GPR, Self::SIMD>, loc_b: Location<Self::GPR, Self::SIMD>, ret: Location<Self::GPR, Self::SIMD>, integer_division_by_zero: DynamicLabel, integer_overflow: DynamicLabel, ) -> Result<usize, CompileError>

Unsigned Division with location directly from the stack. return the offset of the DIV opcode, to mark as trappable.

source

fn emit_binop_sdiv64( &mut self, loc_a: Location<Self::GPR, Self::SIMD>, loc_b: Location<Self::GPR, Self::SIMD>, ret: Location<Self::GPR, Self::SIMD>, integer_division_by_zero: DynamicLabel, integer_overflow: DynamicLabel, ) -> Result<usize, CompileError>

Signed Division with location directly from the stack. return the offset of the DIV opcode, to mark as trappable.

source

fn emit_binop_urem64( &mut self, loc_a: Location<Self::GPR, Self::SIMD>, loc_b: Location<Self::GPR, Self::SIMD>, ret: Location<Self::GPR, Self::SIMD>, integer_division_by_zero: DynamicLabel, integer_overflow: DynamicLabel, ) -> Result<usize, CompileError>

Unsigned Reminder (of a division) with location directly from the stack. return the offset of the DIV opcode, to mark as trappable.

source

fn emit_binop_srem64( &mut self, loc_a: Location<Self::GPR, Self::SIMD>, loc_b: Location<Self::GPR, Self::SIMD>, ret: Location<Self::GPR, Self::SIMD>, integer_division_by_zero: DynamicLabel, integer_overflow: DynamicLabel, ) -> Result<usize, CompileError>

Signed Reminder (of a Division) with location directly from the stack. return the offset of the DIV opcode, to mark as trappable.

source

fn emit_binop_and64( &mut self, loc_a: Location<Self::GPR, Self::SIMD>, loc_b: Location<Self::GPR, Self::SIMD>, ret: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>

And with location directly from the stack

source

fn emit_binop_or64( &mut self, loc_a: Location<Self::GPR, Self::SIMD>, loc_b: Location<Self::GPR, Self::SIMD>, ret: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>

Or with location directly from the stack

source

fn emit_binop_xor64( &mut self, loc_a: Location<Self::GPR, Self::SIMD>, loc_b: Location<Self::GPR, Self::SIMD>, ret: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>

Xor with location directly from the stack

source

fn i64_cmp_ge_s( &mut self, loc_a: Location<Self::GPR, Self::SIMD>, loc_b: Location<Self::GPR, Self::SIMD>, ret: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>

Signed Greater of Equal Compare 2 i64, result in a GPR

source

fn i64_cmp_gt_s( &mut self, loc_a: Location<Self::GPR, Self::SIMD>, loc_b: Location<Self::GPR, Self::SIMD>, ret: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>

Signed Greater Than Compare 2 i64, result in a GPR

source

fn i64_cmp_le_s( &mut self, loc_a: Location<Self::GPR, Self::SIMD>, loc_b: Location<Self::GPR, Self::SIMD>, ret: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>

Signed Less of Equal Compare 2 i64, result in a GPR

source

fn i64_cmp_lt_s( &mut self, loc_a: Location<Self::GPR, Self::SIMD>, loc_b: Location<Self::GPR, Self::SIMD>, ret: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>

Signed Less Than Compare 2 i64, result in a GPR

source

fn i64_cmp_ge_u( &mut self, loc_a: Location<Self::GPR, Self::SIMD>, loc_b: Location<Self::GPR, Self::SIMD>, ret: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>

Unsigned Greater of Equal Compare 2 i64, result in a GPR

source

fn i64_cmp_gt_u( &mut self, loc_a: Location<Self::GPR, Self::SIMD>, loc_b: Location<Self::GPR, Self::SIMD>, ret: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>

Unsigned Greater Than Compare 2 i64, result in a GPR

source

fn i64_cmp_le_u( &mut self, loc_a: Location<Self::GPR, Self::SIMD>, loc_b: Location<Self::GPR, Self::SIMD>, ret: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>

Unsigned Less of Equal Compare 2 i64, result in a GPR

source

fn i64_cmp_lt_u( &mut self, loc_a: Location<Self::GPR, Self::SIMD>, loc_b: Location<Self::GPR, Self::SIMD>, ret: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>

Unsigned Less Than Compare 2 i64, result in a GPR

source

fn i64_cmp_ne( &mut self, loc_a: Location<Self::GPR, Self::SIMD>, loc_b: Location<Self::GPR, Self::SIMD>, ret: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>

Not Equal Compare 2 i64, result in a GPR

source

fn i64_cmp_eq( &mut self, loc_a: Location<Self::GPR, Self::SIMD>, loc_b: Location<Self::GPR, Self::SIMD>, ret: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>

Equal Compare 2 i64, result in a GPR

source

fn i64_clz( &mut self, loc: Location<Self::GPR, Self::SIMD>, ret: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>

Count Leading 0 bit of an i64

source

fn i64_ctz( &mut self, loc: Location<Self::GPR, Self::SIMD>, ret: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>

Count Trailling 0 bit of an i64

source

fn i64_popcnt( &mut self, loc: Location<Self::GPR, Self::SIMD>, ret: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>

Count the number of 1 bit of an i64

source

fn i64_shl( &mut self, loc_a: Location<Self::GPR, Self::SIMD>, loc_b: Location<Self::GPR, Self::SIMD>, ret: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>

i64 Logical Shift Left

source

fn i64_shr( &mut self, loc_a: Location<Self::GPR, Self::SIMD>, loc_b: Location<Self::GPR, Self::SIMD>, ret: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>

i64 Logical Shift Right

source

fn i64_sar( &mut self, loc_a: Location<Self::GPR, Self::SIMD>, loc_b: Location<Self::GPR, Self::SIMD>, ret: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>

i64 Arithmetic Shift Right

source

fn i64_rol( &mut self, loc_a: Location<Self::GPR, Self::SIMD>, loc_b: Location<Self::GPR, Self::SIMD>, ret: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>

i64 Roll Left

source

fn i64_ror( &mut self, loc_a: Location<Self::GPR, Self::SIMD>, loc_b: Location<Self::GPR, Self::SIMD>, ret: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>

i64 Roll Right

source

fn i64_load( &mut self, addr: Location<Self::GPR, Self::SIMD>, memarg: &MemArg, ret: Location<Self::GPR, Self::SIMD>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>

i64 load

source

fn i64_load_8u( &mut self, addr: Location<Self::GPR, Self::SIMD>, memarg: &MemArg, ret: Location<Self::GPR, Self::SIMD>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>

i64 load of an unsigned 8bits

source

fn i64_load_8s( &mut self, addr: Location<Self::GPR, Self::SIMD>, memarg: &MemArg, ret: Location<Self::GPR, Self::SIMD>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>

i64 load of an signed 8bits

source

fn i64_load_32u( &mut self, addr: Location<Self::GPR, Self::SIMD>, memarg: &MemArg, ret: Location<Self::GPR, Self::SIMD>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>

i64 load of an unsigned 32bits

source

fn i64_load_32s( &mut self, addr: Location<Self::GPR, Self::SIMD>, memarg: &MemArg, ret: Location<Self::GPR, Self::SIMD>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>

i64 load of an signed 32bits

source

fn i64_load_16u( &mut self, addr: Location<Self::GPR, Self::SIMD>, memarg: &MemArg, ret: Location<Self::GPR, Self::SIMD>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>

i64 load of an signed 16bits

source

fn i64_load_16s( &mut self, addr: Location<Self::GPR, Self::SIMD>, memarg: &MemArg, ret: Location<Self::GPR, Self::SIMD>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>

i64 load of an signed 16bits

source

fn i64_atomic_load( &mut self, addr: Location<Self::GPR, Self::SIMD>, memarg: &MemArg, ret: Location<Self::GPR, Self::SIMD>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>

i64 atomic load

source

fn i64_atomic_load_8u( &mut self, addr: Location<Self::GPR, Self::SIMD>, memarg: &MemArg, ret: Location<Self::GPR, Self::SIMD>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>

i64 atomic load from unsigned 8bits

source

fn i64_atomic_load_16u( &mut self, addr: Location<Self::GPR, Self::SIMD>, memarg: &MemArg, ret: Location<Self::GPR, Self::SIMD>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>

i64 atomic load from unsigned 16bits

source

fn i64_atomic_load_32u( &mut self, addr: Location<Self::GPR, Self::SIMD>, memarg: &MemArg, ret: Location<Self::GPR, Self::SIMD>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>

i64 atomic load from unsigned 32bits

source

fn i64_save( &mut self, value: Location<Self::GPR, Self::SIMD>, memarg: &MemArg, addr: Location<Self::GPR, Self::SIMD>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>

i64 save

source

fn i64_save_8( &mut self, value: Location<Self::GPR, Self::SIMD>, memarg: &MemArg, addr: Location<Self::GPR, Self::SIMD>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>

i64 save of the lower 8bits

source

fn i64_save_16( &mut self, value: Location<Self::GPR, Self::SIMD>, memarg: &MemArg, addr: Location<Self::GPR, Self::SIMD>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>

i64 save of the lower 16bits

source

fn i64_save_32( &mut self, value: Location<Self::GPR, Self::SIMD>, memarg: &MemArg, addr: Location<Self::GPR, Self::SIMD>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>

i64 save of the lower 32bits

source

fn i64_atomic_save( &mut self, value: Location<Self::GPR, Self::SIMD>, memarg: &MemArg, addr: Location<Self::GPR, Self::SIMD>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>

i64 atomic save

source

fn i64_atomic_save_8( &mut self, value: Location<Self::GPR, Self::SIMD>, memarg: &MemArg, addr: Location<Self::GPR, Self::SIMD>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>

i64 atomic save of a the lower 8bits

source

fn i64_atomic_save_16( &mut self, value: Location<Self::GPR, Self::SIMD>, memarg: &MemArg, addr: Location<Self::GPR, Self::SIMD>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>

i64 atomic save of a the lower 16bits

source

fn i64_atomic_save_32( &mut self, value: Location<Self::GPR, Self::SIMD>, memarg: &MemArg, addr: Location<Self::GPR, Self::SIMD>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>

i64 atomic save of a the lower 32bits

source

fn i64_atomic_add( &mut self, loc: Location<Self::GPR, Self::SIMD>, target: Location<Self::GPR, Self::SIMD>, memarg: &MemArg, ret: Location<Self::GPR, Self::SIMD>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>

i64 atomic Add with i64

source

fn i64_atomic_add_8u( &mut self, loc: Location<Self::GPR, Self::SIMD>, target: Location<Self::GPR, Self::SIMD>, memarg: &MemArg, ret: Location<Self::GPR, Self::SIMD>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>

i64 atomic Add with unsigned 8bits

source

fn i64_atomic_add_16u( &mut self, loc: Location<Self::GPR, Self::SIMD>, target: Location<Self::GPR, Self::SIMD>, memarg: &MemArg, ret: Location<Self::GPR, Self::SIMD>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>

i64 atomic Add with unsigned 16bits

source

fn i64_atomic_add_32u( &mut self, loc: Location<Self::GPR, Self::SIMD>, target: Location<Self::GPR, Self::SIMD>, memarg: &MemArg, ret: Location<Self::GPR, Self::SIMD>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>

i64 atomic Add with unsigned 32bits

source

fn i64_atomic_sub( &mut self, loc: Location<Self::GPR, Self::SIMD>, target: Location<Self::GPR, Self::SIMD>, memarg: &MemArg, ret: Location<Self::GPR, Self::SIMD>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>

i64 atomic Sub with i64

source

fn i64_atomic_sub_8u( &mut self, loc: Location<Self::GPR, Self::SIMD>, target: Location<Self::GPR, Self::SIMD>, memarg: &MemArg, ret: Location<Self::GPR, Self::SIMD>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>

i64 atomic Sub with unsigned 8bits

source

fn i64_atomic_sub_16u( &mut self, loc: Location<Self::GPR, Self::SIMD>, target: Location<Self::GPR, Self::SIMD>, memarg: &MemArg, ret: Location<Self::GPR, Self::SIMD>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>

i64 atomic Sub with unsigned 16bits

source

fn i64_atomic_sub_32u( &mut self, loc: Location<Self::GPR, Self::SIMD>, target: Location<Self::GPR, Self::SIMD>, memarg: &MemArg, ret: Location<Self::GPR, Self::SIMD>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>

i64 atomic Sub with unsigned 32bits

source

fn i64_atomic_and( &mut self, loc: Location<Self::GPR, Self::SIMD>, target: Location<Self::GPR, Self::SIMD>, memarg: &MemArg, ret: Location<Self::GPR, Self::SIMD>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>

i64 atomic And with i64

source

fn i64_atomic_and_8u( &mut self, loc: Location<Self::GPR, Self::SIMD>, target: Location<Self::GPR, Self::SIMD>, memarg: &MemArg, ret: Location<Self::GPR, Self::SIMD>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>

i64 atomic And with unsigned 8bits

source

fn i64_atomic_and_16u( &mut self, loc: Location<Self::GPR, Self::SIMD>, target: Location<Self::GPR, Self::SIMD>, memarg: &MemArg, ret: Location<Self::GPR, Self::SIMD>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>

i64 atomic And with unsigned 16bits

source

fn i64_atomic_and_32u( &mut self, loc: Location<Self::GPR, Self::SIMD>, target: Location<Self::GPR, Self::SIMD>, memarg: &MemArg, ret: Location<Self::GPR, Self::SIMD>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>

i64 atomic And with unsigned 32bits

source

fn i64_atomic_or( &mut self, loc: Location<Self::GPR, Self::SIMD>, target: Location<Self::GPR, Self::SIMD>, memarg: &MemArg, ret: Location<Self::GPR, Self::SIMD>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>

i64 atomic Or with i64

source

fn i64_atomic_or_8u( &mut self, loc: Location<Self::GPR, Self::SIMD>, target: Location<Self::GPR, Self::SIMD>, memarg: &MemArg, ret: Location<Self::GPR, Self::SIMD>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>

i64 atomic Or with unsigned 8bits

source

fn i64_atomic_or_16u( &mut self, loc: Location<Self::GPR, Self::SIMD>, target: Location<Self::GPR, Self::SIMD>, memarg: &MemArg, ret: Location<Self::GPR, Self::SIMD>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>

i64 atomic Or with unsigned 16bits

source

fn i64_atomic_or_32u( &mut self, loc: Location<Self::GPR, Self::SIMD>, target: Location<Self::GPR, Self::SIMD>, memarg: &MemArg, ret: Location<Self::GPR, Self::SIMD>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>

i64 atomic Or with unsigned 32bits

source

fn i64_atomic_xor( &mut self, loc: Location<Self::GPR, Self::SIMD>, target: Location<Self::GPR, Self::SIMD>, memarg: &MemArg, ret: Location<Self::GPR, Self::SIMD>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>

i64 atomic Xor with i64

source

fn i64_atomic_xor_8u( &mut self, loc: Location<Self::GPR, Self::SIMD>, target: Location<Self::GPR, Self::SIMD>, memarg: &MemArg, ret: Location<Self::GPR, Self::SIMD>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>

i64 atomic Xor with unsigned 8bits

source

fn i64_atomic_xor_16u( &mut self, loc: Location<Self::GPR, Self::SIMD>, target: Location<Self::GPR, Self::SIMD>, memarg: &MemArg, ret: Location<Self::GPR, Self::SIMD>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>

i64 atomic Xor with unsigned 16bits

source

fn i64_atomic_xor_32u( &mut self, loc: Location<Self::GPR, Self::SIMD>, target: Location<Self::GPR, Self::SIMD>, memarg: &MemArg, ret: Location<Self::GPR, Self::SIMD>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>

i64 atomic Xor with unsigned 32bits

source

fn i64_atomic_xchg( &mut self, loc: Location<Self::GPR, Self::SIMD>, target: Location<Self::GPR, Self::SIMD>, memarg: &MemArg, ret: Location<Self::GPR, Self::SIMD>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>

i64 atomic Exchange with i64

source

fn i64_atomic_xchg_8u( &mut self, loc: Location<Self::GPR, Self::SIMD>, target: Location<Self::GPR, Self::SIMD>, memarg: &MemArg, ret: Location<Self::GPR, Self::SIMD>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>

i64 atomic Exchange with u8

source

fn i64_atomic_xchg_16u( &mut self, loc: Location<Self::GPR, Self::SIMD>, target: Location<Self::GPR, Self::SIMD>, memarg: &MemArg, ret: Location<Self::GPR, Self::SIMD>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>

i64 atomic Exchange with u16

source

fn i64_atomic_xchg_32u( &mut self, loc: Location<Self::GPR, Self::SIMD>, target: Location<Self::GPR, Self::SIMD>, memarg: &MemArg, ret: Location<Self::GPR, Self::SIMD>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>

i64 atomic Exchange with u32

source

fn i64_atomic_cmpxchg( &mut self, new: Location<Self::GPR, Self::SIMD>, cmp: Location<Self::GPR, Self::SIMD>, target: Location<Self::GPR, Self::SIMD>, memarg: &MemArg, ret: Location<Self::GPR, Self::SIMD>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>

i64 atomic Compare and Exchange with i32

source

fn i64_atomic_cmpxchg_8u( &mut self, new: Location<Self::GPR, Self::SIMD>, cmp: Location<Self::GPR, Self::SIMD>, target: Location<Self::GPR, Self::SIMD>, memarg: &MemArg, ret: Location<Self::GPR, Self::SIMD>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>

i64 atomic Compare and Exchange with u8

source

fn i64_atomic_cmpxchg_16u( &mut self, new: Location<Self::GPR, Self::SIMD>, cmp: Location<Self::GPR, Self::SIMD>, target: Location<Self::GPR, Self::SIMD>, memarg: &MemArg, ret: Location<Self::GPR, Self::SIMD>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>

i64 atomic Compare and Exchange with u16

source

fn i64_atomic_cmpxchg_32u( &mut self, new: Location<Self::GPR, Self::SIMD>, cmp: Location<Self::GPR, Self::SIMD>, target: Location<Self::GPR, Self::SIMD>, memarg: &MemArg, ret: Location<Self::GPR, Self::SIMD>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>

i64 atomic Compare and Exchange with u32

source

fn f32_load( &mut self, addr: Location<Self::GPR, Self::SIMD>, memarg: &MemArg, ret: Location<Self::GPR, Self::SIMD>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>

load an F32

source

fn f32_save( &mut self, value: Location<Self::GPR, Self::SIMD>, memarg: &MemArg, addr: Location<Self::GPR, Self::SIMD>, canonicalize: bool, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>

f32 save

source

fn f64_load( &mut self, addr: Location<Self::GPR, Self::SIMD>, memarg: &MemArg, ret: Location<Self::GPR, Self::SIMD>, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>

load an F64

source

fn f64_save( &mut self, value: Location<Self::GPR, Self::SIMD>, memarg: &MemArg, addr: Location<Self::GPR, Self::SIMD>, canonicalize: bool, need_check: bool, imported_memories: bool, offset: i32, heap_access_oob: DynamicLabel, unaligned_atomic: DynamicLabel, ) -> Result<(), CompileError>

f64 save

source

fn convert_f64_i64( &mut self, loc: Location<Self::GPR, Self::SIMD>, signed: bool, ret: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>

Convert a F64 from I64, signed or unsigned

source

fn convert_f64_i32( &mut self, loc: Location<Self::GPR, Self::SIMD>, signed: bool, ret: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>

Convert a F64 from I32, signed or unsigned

source

fn convert_f32_i64( &mut self, loc: Location<Self::GPR, Self::SIMD>, signed: bool, ret: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>

Convert a F32 from I64, signed or unsigned

source

fn convert_f32_i32( &mut self, loc: Location<Self::GPR, Self::SIMD>, signed: bool, ret: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>

Convert a F32 from I32, signed or unsigned

source

fn convert_i64_f64( &mut self, loc: Location<Self::GPR, Self::SIMD>, ret: Location<Self::GPR, Self::SIMD>, signed: bool, sat: bool, ) -> Result<(), CompileError>

Convert a F64 to I64, signed or unsigned, without or without saturation

source

fn convert_i32_f64( &mut self, loc: Location<Self::GPR, Self::SIMD>, ret: Location<Self::GPR, Self::SIMD>, signed: bool, sat: bool, ) -> Result<(), CompileError>

Convert a F64 to I32, signed or unsigned, without or without saturation

source

fn convert_i64_f32( &mut self, loc: Location<Self::GPR, Self::SIMD>, ret: Location<Self::GPR, Self::SIMD>, signed: bool, sat: bool, ) -> Result<(), CompileError>

Convert a F32 to I64, signed or unsigned, without or without saturation

source

fn convert_i32_f32( &mut self, loc: Location<Self::GPR, Self::SIMD>, ret: Location<Self::GPR, Self::SIMD>, signed: bool, sat: bool, ) -> Result<(), CompileError>

Convert a F32 to I32, signed or unsigned, without or without saturation

source

fn convert_f64_f32( &mut self, loc: Location<Self::GPR, Self::SIMD>, ret: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>

Convert a F32 to F64

source

fn convert_f32_f64( &mut self, loc: Location<Self::GPR, Self::SIMD>, ret: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>

Convert a F64 to F32

source

fn f64_neg( &mut self, loc: Location<Self::GPR, Self::SIMD>, ret: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>

Negate an F64

source

fn f64_abs( &mut self, loc: Location<Self::GPR, Self::SIMD>, ret: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>

Get the Absolute Value of an F64

source

fn emit_i64_copysign( &mut self, tmp1: Self::GPR, tmp2: Self::GPR, ) -> Result<(), CompileError>

Copy sign from tmp1 Self::GPR to tmp2 Self::GPR

source

fn f64_sqrt( &mut self, loc: Location<Self::GPR, Self::SIMD>, ret: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>

Get the Square Root of an F64

source

fn f64_trunc( &mut self, loc: Location<Self::GPR, Self::SIMD>, ret: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>

Trunc of an F64

source

fn f64_ceil( &mut self, loc: Location<Self::GPR, Self::SIMD>, ret: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>

Ceil of an F64

source

fn f64_floor( &mut self, loc: Location<Self::GPR, Self::SIMD>, ret: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>

Floor of an F64

source

fn f64_nearest( &mut self, loc: Location<Self::GPR, Self::SIMD>, ret: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>

Round at nearest int of an F64

source

fn f64_cmp_ge( &mut self, loc_a: Location<Self::GPR, Self::SIMD>, loc_b: Location<Self::GPR, Self::SIMD>, ret: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>

Greater of Equal Compare 2 F64, result in a GPR

source

fn f64_cmp_gt( &mut self, loc_a: Location<Self::GPR, Self::SIMD>, loc_b: Location<Self::GPR, Self::SIMD>, ret: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>

Greater Than Compare 2 F64, result in a GPR

source

fn f64_cmp_le( &mut self, loc_a: Location<Self::GPR, Self::SIMD>, loc_b: Location<Self::GPR, Self::SIMD>, ret: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>

Less of Equal Compare 2 F64, result in a GPR

source

fn f64_cmp_lt( &mut self, loc_a: Location<Self::GPR, Self::SIMD>, loc_b: Location<Self::GPR, Self::SIMD>, ret: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>

Less Than Compare 2 F64, result in a GPR

source

fn f64_cmp_ne( &mut self, loc_a: Location<Self::GPR, Self::SIMD>, loc_b: Location<Self::GPR, Self::SIMD>, ret: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>

Not Equal Compare 2 F64, result in a GPR

source

fn f64_cmp_eq( &mut self, loc_a: Location<Self::GPR, Self::SIMD>, loc_b: Location<Self::GPR, Self::SIMD>, ret: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>

Equal Compare 2 F64, result in a GPR

source

fn f64_min( &mut self, loc_a: Location<Self::GPR, Self::SIMD>, loc_b: Location<Self::GPR, Self::SIMD>, ret: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>

get Min for 2 F64 values

source

fn f64_max( &mut self, loc_a: Location<Self::GPR, Self::SIMD>, loc_b: Location<Self::GPR, Self::SIMD>, ret: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>

get Max for 2 F64 values

source

fn f64_add( &mut self, loc_a: Location<Self::GPR, Self::SIMD>, loc_b: Location<Self::GPR, Self::SIMD>, ret: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>

Add 2 F64 values

source

fn f64_sub( &mut self, loc_a: Location<Self::GPR, Self::SIMD>, loc_b: Location<Self::GPR, Self::SIMD>, ret: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>

Sub 2 F64 values

source

fn f64_mul( &mut self, loc_a: Location<Self::GPR, Self::SIMD>, loc_b: Location<Self::GPR, Self::SIMD>, ret: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>

Multiply 2 F64 values

source

fn f64_div( &mut self, loc_a: Location<Self::GPR, Self::SIMD>, loc_b: Location<Self::GPR, Self::SIMD>, ret: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>

Divide 2 F64 values

source

fn f32_neg( &mut self, loc: Location<Self::GPR, Self::SIMD>, ret: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>

Negate an F32

source

fn f32_abs( &mut self, loc: Location<Self::GPR, Self::SIMD>, ret: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>

Get the Absolute Value of an F32

source

fn emit_i32_copysign( &mut self, tmp1: Self::GPR, tmp2: Self::GPR, ) -> Result<(), CompileError>

Copy sign from tmp1 Self::GPR to tmp2 Self::GPR

source

fn f32_sqrt( &mut self, loc: Location<Self::GPR, Self::SIMD>, ret: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>

Get the Square Root of an F32

source

fn f32_trunc( &mut self, loc: Location<Self::GPR, Self::SIMD>, ret: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>

Trunc of an F32

source

fn f32_ceil( &mut self, loc: Location<Self::GPR, Self::SIMD>, ret: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>

Ceil of an F32

source

fn f32_floor( &mut self, loc: Location<Self::GPR, Self::SIMD>, ret: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>

Floor of an F32

source

fn f32_nearest( &mut self, loc: Location<Self::GPR, Self::SIMD>, ret: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>

Round at nearest int of an F32

source

fn f32_cmp_ge( &mut self, loc_a: Location<Self::GPR, Self::SIMD>, loc_b: Location<Self::GPR, Self::SIMD>, ret: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>

Greater of Equal Compare 2 F32, result in a GPR

source

fn f32_cmp_gt( &mut self, loc_a: Location<Self::GPR, Self::SIMD>, loc_b: Location<Self::GPR, Self::SIMD>, ret: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>

Greater Than Compare 2 F32, result in a GPR

source

fn f32_cmp_le( &mut self, loc_a: Location<Self::GPR, Self::SIMD>, loc_b: Location<Self::GPR, Self::SIMD>, ret: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>

Less of Equal Compare 2 F32, result in a GPR

source

fn f32_cmp_lt( &mut self, loc_a: Location<Self::GPR, Self::SIMD>, loc_b: Location<Self::GPR, Self::SIMD>, ret: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>

Less Than Compare 2 F32, result in a GPR

source

fn f32_cmp_ne( &mut self, loc_a: Location<Self::GPR, Self::SIMD>, loc_b: Location<Self::GPR, Self::SIMD>, ret: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>

Not Equal Compare 2 F32, result in a GPR

source

fn f32_cmp_eq( &mut self, loc_a: Location<Self::GPR, Self::SIMD>, loc_b: Location<Self::GPR, Self::SIMD>, ret: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>

Equal Compare 2 F32, result in a GPR

source

fn f32_min( &mut self, loc_a: Location<Self::GPR, Self::SIMD>, loc_b: Location<Self::GPR, Self::SIMD>, ret: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>

get Min for 2 F32 values

source

fn f32_max( &mut self, loc_a: Location<Self::GPR, Self::SIMD>, loc_b: Location<Self::GPR, Self::SIMD>, ret: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>

get Max for 2 F32 values

source

fn f32_add( &mut self, loc_a: Location<Self::GPR, Self::SIMD>, loc_b: Location<Self::GPR, Self::SIMD>, ret: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>

Add 2 F32 values

source

fn f32_sub( &mut self, loc_a: Location<Self::GPR, Self::SIMD>, loc_b: Location<Self::GPR, Self::SIMD>, ret: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>

Sub 2 F32 values

source

fn f32_mul( &mut self, loc_a: Location<Self::GPR, Self::SIMD>, loc_b: Location<Self::GPR, Self::SIMD>, ret: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>

Multiply 2 F32 values

source

fn f32_div( &mut self, loc_a: Location<Self::GPR, Self::SIMD>, loc_b: Location<Self::GPR, Self::SIMD>, ret: Location<Self::GPR, Self::SIMD>, ) -> Result<(), CompileError>

Divide 2 F32 values

source

fn gen_std_trampoline( &self, sig: &FunctionType, calling_convention: CallingConvention, ) -> Result<FunctionBody, CompileError>

Standard function Trampoline generation

source

fn gen_std_dynamic_import_trampoline( &self, vmoffsets: &VMOffsets, sig: &FunctionType, calling_convention: CallingConvention, ) -> Result<FunctionBody, CompileError>

Generates dynamic import function call trampoline for a function type.

source

fn gen_import_call_trampoline( &self, vmoffsets: &VMOffsets, index: FunctionIndex, sig: &FunctionType, calling_convention: CallingConvention, ) -> Result<CustomSection, CompileError>

Singlepass calls import functions through a trampoline.

source

fn gen_dwarf_unwind_info( &mut self, code_len: usize, ) -> Option<UnwindInstructions>

generate eh_frame instruction (or None if not possible / supported)

source

fn gen_windows_unwind_info(&mut self, code_len: usize) -> Option<Vec<u8>>

generate Windows unwind instructions (or None if not possible / supported)

Implementors§