wasmer_compiler_singlepass/
machine.rs

1use crate::{
2    common_decl::*,
3    location::{Location, Reg},
4    machine_arm64::MachineARM64,
5    machine_riscv::MachineRiscv,
6    machine_x64::MachineX86_64,
7    unwind::UnwindInstructions,
8};
9
10use dynasmrt::{AssemblyOffset, DynamicLabel};
11use std::{
12    collections::{BTreeMap, HashMap},
13    fmt::Debug,
14};
15use wasmer_compiler::{
16    types::{
17        address_map::InstructionAddressMap,
18        function::FunctionBody,
19        relocation::{Relocation, RelocationTarget},
20        section::CustomSection,
21    },
22    wasmparser::MemArg,
23};
24use wasmer_types::{
25    CompileError, FunctionIndex, FunctionType, TrapCode, TrapInformation, VMOffsets,
26    target::{Architecture, CallingConvention, Target},
27};
28pub type Label = DynamicLabel;
29pub type Offset = AssemblyOffset;
30
31#[allow(dead_code)]
32#[derive(Clone, PartialEq)]
33pub enum Value {
34    I8(i8),
35    I32(i32),
36    I64(i64),
37    F32(f32),
38    F64(f64),
39}
40
41#[macro_export]
42macro_rules! codegen_error {
43    ($($arg:tt)*) => {return Err(CompileError::Codegen(format!($($arg)*)))}
44}
45
46#[allow(unused)]
47pub trait MaybeImmediate {
48    fn imm_value(&self) -> Option<Value>;
49    fn is_imm(&self) -> bool {
50        self.imm_value().is_some()
51    }
52    fn imm_value_scalar(&self) -> Option<i64>;
53}
54
55/// A trap table for a `RunnableModuleInfo`.
56#[derive(Clone, Debug, Default)]
57pub struct TrapTable {
58    /// Mappings from offsets in generated machine code to the corresponding trap code.
59    pub offset_to_code: BTreeMap<usize, TrapCode>,
60}
61
62// all machine seems to have a page this size, so not per arch for now
63pub const NATIVE_PAGE_SIZE: usize = 4096;
64
65#[allow(dead_code)]
66pub enum UnsignedCondition {
67    Equal,
68    NotEqual,
69    Above,
70    AboveEqual,
71    Below,
72    BelowEqual,
73}
74
75#[derive(Debug, Clone)]
76pub enum AssemblyComment {
77    FunctionPrologue,
78    InitializeLocals,
79    TrapHandlersTable,
80    RedZone,
81    FunctionBody,
82}
83
84impl std::fmt::Display for AssemblyComment {
85    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
86        match self {
87            AssemblyComment::FunctionPrologue => write!(f, "function prologue"),
88            AssemblyComment::InitializeLocals => write!(f, "initialize locals"),
89            AssemblyComment::TrapHandlersTable => write!(f, "trap handlers table"),
90            AssemblyComment::RedZone => write!(f, "red zone"),
91            AssemblyComment::FunctionBody => write!(f, "body"),
92        }
93    }
94}
95
96pub(crate) struct FinalizedAssembly {
97    pub(crate) body: Vec<u8>,
98    pub(crate) assembly_comments: HashMap<usize, AssemblyComment>,
99}
100
101#[allow(unused)]
102pub trait Machine {
103    type GPR: Copy + Eq + Debug + Reg;
104    type SIMD: Copy + Eq + Debug + Reg;
105    /// Get current assembler offset
106    fn assembler_get_offset(&self) -> Offset;
107    /// Get the GPR that hold vmctx
108    fn get_vmctx_reg(&self) -> Self::GPR;
109    /// Picks an unused general purpose register for local/stack/argument use.
110    ///
111    /// This method does not mark the register as used
112    fn pick_gpr(&self) -> Option<Self::GPR>;
113    /// Picks an unused general purpose register for internal temporary use.
114    ///
115    /// This method does not mark the register as used
116    fn pick_temp_gpr(&self) -> Option<Self::GPR>;
117    /// Get all used GPR
118    fn get_used_gprs(&self) -> Vec<Self::GPR>;
119    /// Get all used SIMD regs
120    fn get_used_simd(&self) -> Vec<Self::SIMD>;
121    /// Picks an unused general pupose register and mark it as used
122    fn acquire_temp_gpr(&mut self) -> Option<Self::GPR>;
123    /// Releases a temporary GPR.
124    fn release_gpr(&mut self, gpr: Self::GPR);
125    /// Specify that a given register is in use.
126    fn reserve_unused_temp_gpr(&mut self, gpr: Self::GPR) -> Self::GPR;
127    /// reserve a GPR
128    fn reserve_gpr(&mut self, gpr: Self::GPR);
129    /// Push used gpr to the stack. Return the bytes taken on the stack.
130    fn push_used_gpr(&mut self, gprs: &[Self::GPR]) -> Result<usize, CompileError>;
131    /// Pop used gpr from the stack.
132    fn pop_used_gpr(&mut self, gprs: &[Self::GPR]) -> Result<(), CompileError>;
133    /// Picks an unused SIMD register.
134    ///
135    /// This method does not mark the register as used
136    fn pick_simd(&self) -> Option<Self::SIMD>;
137    /// Picks an unused SIMD register for internal temporary use.
138    ///
139    /// This method does not mark the register as used
140    fn pick_temp_simd(&self) -> Option<Self::SIMD>;
141    /// Acquires a temporary XMM register.
142    fn acquire_temp_simd(&mut self) -> Option<Self::SIMD>;
143    /// reserve a SIMD register
144    fn reserve_simd(&mut self, simd: Self::SIMD);
145    /// Releases a temporary XMM register.
146    fn release_simd(&mut self, simd: Self::SIMD);
147    /// Push used simd regs to the stack. Return bytes taken on the stack
148    fn push_used_simd(&mut self, simds: &[Self::SIMD]) -> Result<usize, CompileError>;
149    /// Pop used simd regs to the stack
150    fn pop_used_simd(&mut self, simds: &[Self::SIMD]) -> Result<(), CompileError>;
151    /// Return a rounded stack adjustement value (must be multiple of 16bytes on ARM64 for example)
152    fn round_stack_adjust(&self, value: usize) -> usize;
153    /// Set the source location of the Wasm to the given offset.
154    fn set_srcloc(&mut self, offset: u32);
155    /// Marks each address in the code range emitted by `f` with the trap code `code`.
156    fn mark_address_range_with_trap_code(&mut self, code: TrapCode, begin: usize, end: usize);
157    /// Marks one address as trappable with trap code `code`.
158    fn mark_address_with_trap_code(&mut self, code: TrapCode);
159    /// Marks the instruction as trappable with trap code `code`. return "begin" offset
160    fn mark_instruction_with_trap_code(&mut self, code: TrapCode) -> usize;
161    /// Pushes the instruction to the address map, calculating the offset from a
162    /// provided beginning address.
163    fn mark_instruction_address_end(&mut self, begin: usize);
164    /// Insert a StackOverflow (at offset 0)
165    fn insert_stackoverflow(&mut self);
166    /// Get all current TrapInformation
167    fn collect_trap_information(&self) -> Vec<TrapInformation>;
168    // Get all intructions address map
169    fn instructions_address_map(&self) -> Vec<InstructionAddressMap>;
170    /// Memory location for a local on the stack
171    /// Like Location::Memory(GPR::RBP, -(self.stack_offset.0 as i32)) for x86_64
172    fn local_on_stack(&mut self, stack_offset: i32) -> Location<Self::GPR, Self::SIMD>;
173    /// Allocate an extra space on the stack.
174    fn extend_stack(&mut self, delta_stack_offset: u32) -> Result<(), CompileError>;
175    /// Truncate stack space by the `delta_stack_offset`.
176    fn truncate_stack(&mut self, delta_stack_offset: u32) -> Result<(), CompileError>;
177    /// Zero a location taht is 32bits
178    fn zero_location(
179        &mut self,
180        size: Size,
181        location: Location<Self::GPR, Self::SIMD>,
182    ) -> Result<(), CompileError>;
183    /// GPR Reg used for local pointer on the stack
184    fn local_pointer(&self) -> Self::GPR;
185    /// push a value on the stack for a native call
186    fn move_location_for_native(
187        &mut self,
188        size: Size,
189        loc: Location<Self::GPR, Self::SIMD>,
190        dest: Location<Self::GPR, Self::SIMD>,
191    ) -> Result<(), CompileError>;
192    /// Determine whether a local should be allocated on the stack.
193    fn is_local_on_stack(&self, idx: usize) -> bool;
194    /// Determine a local's location.
195    fn get_local_location(
196        &self,
197        idx: usize,
198        callee_saved_regs_size: usize,
199    ) -> Location<Self::GPR, Self::SIMD>;
200    /// Move a local to the stack
201    /// Like emit_mov(Size::S64, location, Location::Memory(GPR::RBP, -(self.stack_offset.0 as i32)));
202    fn move_local(
203        &mut self,
204        stack_offset: i32,
205        location: Location<Self::GPR, Self::SIMD>,
206    ) -> Result<(), CompileError>;
207    /// List of register to save, depending on the CallingConvention
208    fn list_to_save(
209        &self,
210        calling_convention: CallingConvention,
211    ) -> Vec<Location<Self::GPR, Self::SIMD>>;
212    /// Get registers for first N function call parameters.
213    fn get_param_registers(&self, calling_convention: CallingConvention) -> &'static [Self::GPR];
214    /// Get param location (to build a call, using SP for stack args)
215    fn get_param_location(
216        &self,
217        idx: usize,
218        sz: Size,
219        stack_offset: &mut usize,
220        calling_convention: CallingConvention,
221    ) -> Location<Self::GPR, Self::SIMD>;
222    /// Get call param location (from a call, using FP for stack args)
223    fn get_call_param_location(
224        &self,
225        result_slots: usize,
226        idx: usize,
227        sz: Size,
228        stack_offset: &mut usize,
229        calling_convention: CallingConvention,
230    ) -> Location<Self::GPR, Self::SIMD>;
231    /// Get param location (idx must point to an argument that is passed in a GPR).
232    fn get_simple_param_location(
233        &self,
234        idx: usize,
235        calling_convention: CallingConvention,
236    ) -> Self::GPR;
237    /// Get return value location (to build a call, using SP for stack return values).
238    fn get_return_value_location(
239        &self,
240        idx: usize,
241        stack_location: &mut usize,
242        calling_convention: CallingConvention,
243    ) -> Location<Self::GPR, Self::SIMD>;
244    /// Get return value location (from a call, using FP for stack return values).
245    fn get_call_return_value_location(
246        &self,
247        idx: usize,
248        calling_convention: CallingConvention,
249    ) -> Location<Self::GPR, Self::SIMD>;
250    /// move a location to another
251    fn move_location(
252        &mut self,
253        size: Size,
254        source: Location<Self::GPR, Self::SIMD>,
255        dest: Location<Self::GPR, Self::SIMD>,
256    ) -> Result<(), CompileError>;
257    /// move a location to another, with zero or sign extension
258    fn move_location_extend(
259        &mut self,
260        size_val: Size,
261        signed: bool,
262        source: Location<Self::GPR, Self::SIMD>,
263        size_op: Size,
264        dest: Location<Self::GPR, Self::SIMD>,
265    ) -> Result<(), CompileError>;
266    /// Init the stack loc counter
267    fn init_stack_loc(
268        &mut self,
269        init_stack_loc_cnt: u64,
270        last_stack_loc: Location<Self::GPR, Self::SIMD>,
271    ) -> Result<(), CompileError>;
272    /// Restore save_area
273    fn restore_saved_area(&mut self, saved_area_offset: i32) -> Result<(), CompileError>;
274    /// Pop a location
275    fn pop_location(
276        &mut self,
277        location: Location<Self::GPR, Self::SIMD>,
278    ) -> Result<(), CompileError>;
279
280    /// Finalize the assembler
281    fn assembler_finalize(
282        self,
283        assembly_comments: HashMap<usize, AssemblyComment>,
284    ) -> Result<FinalizedAssembly, CompileError>;
285
286    /// get_offset of Assembler
287    fn get_offset(&self) -> Offset;
288
289    /// finalize a function
290    fn finalize_function(&mut self) -> Result<(), CompileError>;
291
292    /// emit native function prolog (depending on the calling Convention, like "PUSH RBP / MOV RSP, RBP")
293    fn emit_function_prolog(&mut self) -> Result<(), CompileError>;
294    /// emit native function epilog (depending on the calling Convention, like "MOV RBP, RSP / POP RBP")
295    fn emit_function_epilog(&mut self) -> Result<(), CompileError>;
296    /// Handle copy to SIMD register from ret value (if needed by the arch/calling convention)
297    fn emit_function_return_float(&mut self) -> Result<(), CompileError>;
298    /// Cannonicalize a NaN (or panic if not supported)
299    fn canonicalize_nan(
300        &mut self,
301        sz: Size,
302        input: Location<Self::GPR, Self::SIMD>,
303        output: Location<Self::GPR, Self::SIMD>,
304    ) -> Result<(), CompileError>;
305
306    /// emit an Illegal Opcode, associated with a trapcode
307    fn emit_illegal_op(&mut self, trp: TrapCode) -> Result<(), CompileError>;
308    /// create a new label
309    fn get_label(&mut self) -> Label;
310    /// emit a label
311    fn emit_label(&mut self, label: Label) -> Result<(), CompileError>;
312
313    /// get the gpr used for call. like RAX on x86_64
314    fn get_gpr_for_call(&self) -> Self::GPR;
315    /// Emit a call using the value in register
316    fn emit_call_register(&mut self, register: Self::GPR) -> Result<(), CompileError>;
317    /// Emit a call to a label
318    fn emit_call_label(&mut self, label: Label) -> Result<(), CompileError>;
319    /// indirect call with trampoline
320    fn arch_emit_indirect_call_with_trampoline(
321        &mut self,
322        location: Location<Self::GPR, Self::SIMD>,
323    ) -> Result<(), CompileError>;
324    /// emit a call to a location
325    fn emit_call_location(
326        &mut self,
327        location: Location<Self::GPR, Self::SIMD>,
328    ) -> Result<(), CompileError>;
329
330    /// Emit a debug breakpoint
331    fn emit_debug_breakpoint(&mut self) -> Result<(), CompileError>;
332
333    /// Add src+dst -> dst (with or without flags)
334    fn location_add(
335        &mut self,
336        size: Size,
337        source: Location<Self::GPR, Self::SIMD>,
338        dest: Location<Self::GPR, Self::SIMD>,
339        flags: bool,
340    ) -> Result<(), CompileError>;
341
342    /// Cmp src - dst and set flags
343    fn location_cmp(
344        &mut self,
345        size: Size,
346        source: Location<Self::GPR, Self::SIMD>,
347        dest: Location<Self::GPR, Self::SIMD>,
348    ) -> Result<(), CompileError>;
349
350    /// jmp without condidtion
351    fn jmp_unconditional(&mut self, label: Label) -> Result<(), CompileError>;
352
353    /// jmp to label if the provided condition is true (when comparing loc_a and loc_b)
354    fn jmp_on_condition(
355        &mut self,
356        cond: UnsignedCondition,
357        size: Size,
358        loc_a: Location<Self::GPR, Self::SIMD>,
359        loc_b: Location<Self::GPR, Self::SIMD>,
360        label: Label,
361    ) -> Result<(), CompileError>;
362
363    /// jmp using a jump table at lable with cond as the indice
364    fn emit_jmp_to_jumptable(
365        &mut self,
366        label: Label,
367        cond: Location<Self::GPR, Self::SIMD>,
368    ) -> Result<(), CompileError>;
369
370    /// Align for Loop (may do nothing, depending on the arch)
371    fn align_for_loop(&mut self) -> Result<(), CompileError>;
372
373    /// ret (from a Call)
374    fn emit_ret(&mut self) -> Result<(), CompileError>;
375
376    /// Stack push of a location
377    fn emit_push(
378        &mut self,
379        size: Size,
380        loc: Location<Self::GPR, Self::SIMD>,
381    ) -> Result<(), CompileError>;
382    /// Stack pop of a location
383    fn emit_pop(
384        &mut self,
385        size: Size,
386        loc: Location<Self::GPR, Self::SIMD>,
387    ) -> Result<(), CompileError>;
388    /// relaxed mov: move from anywhere to anywhere
389    fn emit_relaxed_mov(
390        &mut self,
391        sz: Size,
392        src: Location<Self::GPR, Self::SIMD>,
393        dst: Location<Self::GPR, Self::SIMD>,
394    ) -> Result<(), CompileError>;
395    /// relaxed cmp: compare from anywhere and anywhere
396    fn emit_relaxed_cmp(
397        &mut self,
398        sz: Size,
399        src: Location<Self::GPR, Self::SIMD>,
400        dst: Location<Self::GPR, Self::SIMD>,
401    ) -> Result<(), CompileError>;
402    /// Emit a memory fence. Can be nothing for x86_64 or a DMB on ARM64 for example
403    fn emit_memory_fence(&mut self) -> Result<(), CompileError>;
404    /// relaxed move with sign extension
405    fn emit_relaxed_sign_extension(
406        &mut self,
407        sz_src: Size,
408        src: Location<Self::GPR, Self::SIMD>,
409        sz_dst: Size,
410        dst: Location<Self::GPR, Self::SIMD>,
411    ) -> Result<(), CompileError>;
412    /// Multiply location with immediate
413    fn emit_imul_imm32(
414        &mut self,
415        size: Size,
416        imm32: u32,
417        gpr: Self::GPR,
418    ) -> Result<(), CompileError>;
419    /// Add with location directly from the stack
420    fn emit_binop_add32(
421        &mut self,
422        loc_a: Location<Self::GPR, Self::SIMD>,
423        loc_b: Location<Self::GPR, Self::SIMD>,
424        ret: Location<Self::GPR, Self::SIMD>,
425    ) -> Result<(), CompileError>;
426    /// Sub with location directly from the stack
427    fn emit_binop_sub32(
428        &mut self,
429        loc_a: Location<Self::GPR, Self::SIMD>,
430        loc_b: Location<Self::GPR, Self::SIMD>,
431        ret: Location<Self::GPR, Self::SIMD>,
432    ) -> Result<(), CompileError>;
433    /// Multiply with location directly from the stack
434    fn emit_binop_mul32(
435        &mut self,
436        loc_a: Location<Self::GPR, Self::SIMD>,
437        loc_b: Location<Self::GPR, Self::SIMD>,
438        ret: Location<Self::GPR, Self::SIMD>,
439    ) -> Result<(), CompileError>;
440    /// Unsigned Division with location directly from the stack. return the offset of the DIV opcode, to mark as trappable.
441    fn emit_binop_udiv32(
442        &mut self,
443        loc_a: Location<Self::GPR, Self::SIMD>,
444        loc_b: Location<Self::GPR, Self::SIMD>,
445        ret: Location<Self::GPR, Self::SIMD>,
446        integer_division_by_zero: Label,
447    ) -> Result<usize, CompileError>;
448    /// Signed Division with location directly from the stack. return the offset of the DIV opcode, to mark as trappable.
449    fn emit_binop_sdiv32(
450        &mut self,
451        loc_a: Location<Self::GPR, Self::SIMD>,
452        loc_b: Location<Self::GPR, Self::SIMD>,
453        ret: Location<Self::GPR, Self::SIMD>,
454        integer_division_by_zero: Label,
455        integer_overflow: Label,
456    ) -> Result<usize, CompileError>;
457    /// Unsigned Reminder (of a division) with location directly from the stack. return the offset of the DIV opcode, to mark as trappable.
458    fn emit_binop_urem32(
459        &mut self,
460        loc_a: Location<Self::GPR, Self::SIMD>,
461        loc_b: Location<Self::GPR, Self::SIMD>,
462        ret: Location<Self::GPR, Self::SIMD>,
463        integer_division_by_zero: Label,
464    ) -> Result<usize, CompileError>;
465    /// Signed Reminder (of a Division) with location directly from the stack. return the offset of the DIV opcode, to mark as trappable.
466    fn emit_binop_srem32(
467        &mut self,
468        loc_a: Location<Self::GPR, Self::SIMD>,
469        loc_b: Location<Self::GPR, Self::SIMD>,
470        ret: Location<Self::GPR, Self::SIMD>,
471        integer_division_by_zero: Label,
472    ) -> Result<usize, CompileError>;
473    /// And with location directly from the stack
474    fn emit_binop_and32(
475        &mut self,
476        loc_a: Location<Self::GPR, Self::SIMD>,
477        loc_b: Location<Self::GPR, Self::SIMD>,
478        ret: Location<Self::GPR, Self::SIMD>,
479    ) -> Result<(), CompileError>;
480    /// Or with location directly from the stack
481    fn emit_binop_or32(
482        &mut self,
483        loc_a: Location<Self::GPR, Self::SIMD>,
484        loc_b: Location<Self::GPR, Self::SIMD>,
485        ret: Location<Self::GPR, Self::SIMD>,
486    ) -> Result<(), CompileError>;
487    /// Xor with location directly from the stack
488    fn emit_binop_xor32(
489        &mut self,
490        loc_a: Location<Self::GPR, Self::SIMD>,
491        loc_b: Location<Self::GPR, Self::SIMD>,
492        ret: Location<Self::GPR, Self::SIMD>,
493    ) -> Result<(), CompileError>;
494    /// Signed Greater of Equal Compare 2 i32, result in a GPR
495    fn i32_cmp_ge_s(
496        &mut self,
497        loc_a: Location<Self::GPR, Self::SIMD>,
498        loc_b: Location<Self::GPR, Self::SIMD>,
499        ret: Location<Self::GPR, Self::SIMD>,
500    ) -> Result<(), CompileError>;
501    /// Signed Greater Than Compare 2 i32, result in a GPR
502    fn i32_cmp_gt_s(
503        &mut self,
504        loc_a: Location<Self::GPR, Self::SIMD>,
505        loc_b: Location<Self::GPR, Self::SIMD>,
506        ret: Location<Self::GPR, Self::SIMD>,
507    ) -> Result<(), CompileError>;
508    /// Signed Less of Equal Compare 2 i32, result in a GPR
509    fn i32_cmp_le_s(
510        &mut self,
511        loc_a: Location<Self::GPR, Self::SIMD>,
512        loc_b: Location<Self::GPR, Self::SIMD>,
513        ret: Location<Self::GPR, Self::SIMD>,
514    ) -> Result<(), CompileError>;
515    /// Signed Less Than Compare 2 i32, result in a GPR
516    fn i32_cmp_lt_s(
517        &mut self,
518        loc_a: Location<Self::GPR, Self::SIMD>,
519        loc_b: Location<Self::GPR, Self::SIMD>,
520        ret: Location<Self::GPR, Self::SIMD>,
521    ) -> Result<(), CompileError>;
522    /// Unsigned Greater of Equal Compare 2 i32, result in a GPR
523    fn i32_cmp_ge_u(
524        &mut self,
525        loc_a: Location<Self::GPR, Self::SIMD>,
526        loc_b: Location<Self::GPR, Self::SIMD>,
527        ret: Location<Self::GPR, Self::SIMD>,
528    ) -> Result<(), CompileError>;
529    /// Unsigned Greater Than Compare 2 i32, result in a GPR
530    fn i32_cmp_gt_u(
531        &mut self,
532        loc_a: Location<Self::GPR, Self::SIMD>,
533        loc_b: Location<Self::GPR, Self::SIMD>,
534        ret: Location<Self::GPR, Self::SIMD>,
535    ) -> Result<(), CompileError>;
536    /// Unsigned Less of Equal Compare 2 i32, result in a GPR
537    fn i32_cmp_le_u(
538        &mut self,
539        loc_a: Location<Self::GPR, Self::SIMD>,
540        loc_b: Location<Self::GPR, Self::SIMD>,
541        ret: Location<Self::GPR, Self::SIMD>,
542    ) -> Result<(), CompileError>;
543    /// Unsigned Less Than Compare 2 i32, result in a GPR
544    fn i32_cmp_lt_u(
545        &mut self,
546        loc_a: Location<Self::GPR, Self::SIMD>,
547        loc_b: Location<Self::GPR, Self::SIMD>,
548        ret: Location<Self::GPR, Self::SIMD>,
549    ) -> Result<(), CompileError>;
550    /// Not Equal Compare 2 i32, result in a GPR
551    fn i32_cmp_ne(
552        &mut self,
553        loc_a: Location<Self::GPR, Self::SIMD>,
554        loc_b: Location<Self::GPR, Self::SIMD>,
555        ret: Location<Self::GPR, Self::SIMD>,
556    ) -> Result<(), CompileError>;
557    /// Equal Compare 2 i32, result in a GPR
558    fn i32_cmp_eq(
559        &mut self,
560        loc_a: Location<Self::GPR, Self::SIMD>,
561        loc_b: Location<Self::GPR, Self::SIMD>,
562        ret: Location<Self::GPR, Self::SIMD>,
563    ) -> Result<(), CompileError>;
564    /// Count Leading 0 bit of an i32
565    fn i32_clz(
566        &mut self,
567        loc: Location<Self::GPR, Self::SIMD>,
568        ret: Location<Self::GPR, Self::SIMD>,
569    ) -> Result<(), CompileError>;
570    /// Count Trailling 0 bit of an i32
571    fn i32_ctz(
572        &mut self,
573        loc: Location<Self::GPR, Self::SIMD>,
574        ret: Location<Self::GPR, Self::SIMD>,
575    ) -> Result<(), CompileError>;
576    /// Count the number of 1 bit of an i32
577    fn i32_popcnt(
578        &mut self,
579        loc: Location<Self::GPR, Self::SIMD>,
580        ret: Location<Self::GPR, Self::SIMD>,
581    ) -> Result<(), CompileError>;
582    /// i32 Logical Shift Left
583    fn i32_shl(
584        &mut self,
585        loc_a: Location<Self::GPR, Self::SIMD>,
586        loc_b: Location<Self::GPR, Self::SIMD>,
587        ret: Location<Self::GPR, Self::SIMD>,
588    ) -> Result<(), CompileError>;
589    /// i32 Logical Shift Right
590    fn i32_shr(
591        &mut self,
592        loc_a: Location<Self::GPR, Self::SIMD>,
593        loc_b: Location<Self::GPR, Self::SIMD>,
594        ret: Location<Self::GPR, Self::SIMD>,
595    ) -> Result<(), CompileError>;
596    /// i32 Arithmetic Shift Right
597    fn i32_sar(
598        &mut self,
599        loc_a: Location<Self::GPR, Self::SIMD>,
600        loc_b: Location<Self::GPR, Self::SIMD>,
601        ret: Location<Self::GPR, Self::SIMD>,
602    ) -> Result<(), CompileError>;
603    /// i32 Roll Left
604    fn i32_rol(
605        &mut self,
606        loc_a: Location<Self::GPR, Self::SIMD>,
607        loc_b: Location<Self::GPR, Self::SIMD>,
608        ret: Location<Self::GPR, Self::SIMD>,
609    ) -> Result<(), CompileError>;
610    /// i32 Roll Right
611    fn i32_ror(
612        &mut self,
613        loc_a: Location<Self::GPR, Self::SIMD>,
614        loc_b: Location<Self::GPR, Self::SIMD>,
615        ret: Location<Self::GPR, Self::SIMD>,
616    ) -> Result<(), CompileError>;
617    /// i32 load
618    #[allow(clippy::too_many_arguments)]
619    fn i32_load(
620        &mut self,
621        addr: Location<Self::GPR, Self::SIMD>,
622        memarg: &MemArg,
623        ret: Location<Self::GPR, Self::SIMD>,
624        need_check: bool,
625        imported_memories: bool,
626        offset: i32,
627        heap_access_oob: Label,
628        unaligned_atomic: Label,
629    ) -> Result<(), CompileError>;
630    /// i32 load of an unsigned 8bits
631    #[allow(clippy::too_many_arguments)]
632    fn i32_load_8u(
633        &mut self,
634        addr: Location<Self::GPR, Self::SIMD>,
635        memarg: &MemArg,
636        ret: Location<Self::GPR, Self::SIMD>,
637        need_check: bool,
638        imported_memories: bool,
639        offset: i32,
640        heap_access_oob: Label,
641        unaligned_atomic: Label,
642    ) -> Result<(), CompileError>;
643    /// i32 load of an signed 8bits
644    #[allow(clippy::too_many_arguments)]
645    fn i32_load_8s(
646        &mut self,
647        addr: Location<Self::GPR, Self::SIMD>,
648        memarg: &MemArg,
649        ret: Location<Self::GPR, Self::SIMD>,
650        need_check: bool,
651        imported_memories: bool,
652        offset: i32,
653        heap_access_oob: Label,
654        unaligned_atomic: Label,
655    ) -> Result<(), CompileError>;
656    /// i32 load of an unsigned 16bits
657    #[allow(clippy::too_many_arguments)]
658    fn i32_load_16u(
659        &mut self,
660        addr: Location<Self::GPR, Self::SIMD>,
661        memarg: &MemArg,
662        ret: Location<Self::GPR, Self::SIMD>,
663        need_check: bool,
664        imported_memories: bool,
665        offset: i32,
666        heap_access_oob: Label,
667        unaligned_atomic: Label,
668    ) -> Result<(), CompileError>;
669    /// i32 load of an signed 16bits
670    #[allow(clippy::too_many_arguments)]
671    fn i32_load_16s(
672        &mut self,
673        addr: Location<Self::GPR, Self::SIMD>,
674        memarg: &MemArg,
675        ret: Location<Self::GPR, Self::SIMD>,
676        need_check: bool,
677        imported_memories: bool,
678        offset: i32,
679        heap_access_oob: Label,
680        unaligned_atomic: Label,
681    ) -> Result<(), CompileError>;
682    /// i32 atomic load
683    #[allow(clippy::too_many_arguments)]
684    fn i32_atomic_load(
685        &mut self,
686        addr: Location<Self::GPR, Self::SIMD>,
687        memarg: &MemArg,
688        ret: Location<Self::GPR, Self::SIMD>,
689        need_check: bool,
690        imported_memories: bool,
691        offset: i32,
692        heap_access_oob: Label,
693        unaligned_atomic: Label,
694    ) -> Result<(), CompileError>;
695    /// i32 atomic load of an unsigned 8bits
696    #[allow(clippy::too_many_arguments)]
697    fn i32_atomic_load_8u(
698        &mut self,
699        addr: Location<Self::GPR, Self::SIMD>,
700        memarg: &MemArg,
701        ret: Location<Self::GPR, Self::SIMD>,
702        need_check: bool,
703        imported_memories: bool,
704        offset: i32,
705        heap_access_oob: Label,
706        unaligned_atomic: Label,
707    ) -> Result<(), CompileError>;
708    /// i32 atomic load of an unsigned 16bits
709    #[allow(clippy::too_many_arguments)]
710    fn i32_atomic_load_16u(
711        &mut self,
712        addr: Location<Self::GPR, Self::SIMD>,
713        memarg: &MemArg,
714        ret: Location<Self::GPR, Self::SIMD>,
715        need_check: bool,
716        imported_memories: bool,
717        offset: i32,
718        heap_access_oob: Label,
719        unaligned_atomic: Label,
720    ) -> Result<(), CompileError>;
721    /// i32 save
722    #[allow(clippy::too_many_arguments)]
723    fn i32_save(
724        &mut self,
725        value: Location<Self::GPR, Self::SIMD>,
726        memarg: &MemArg,
727        addr: Location<Self::GPR, Self::SIMD>,
728        need_check: bool,
729        imported_memories: bool,
730        offset: i32,
731        heap_access_oob: Label,
732        unaligned_atomic: Label,
733    ) -> Result<(), CompileError>;
734    /// i32 save of the lower 8bits
735    #[allow(clippy::too_many_arguments)]
736    fn i32_save_8(
737        &mut self,
738        value: Location<Self::GPR, Self::SIMD>,
739        memarg: &MemArg,
740        addr: Location<Self::GPR, Self::SIMD>,
741        need_check: bool,
742        imported_memories: bool,
743        offset: i32,
744        heap_access_oob: Label,
745        unaligned_atomic: Label,
746    ) -> Result<(), CompileError>;
747    /// i32 save of the lower 16bits
748    #[allow(clippy::too_many_arguments)]
749    fn i32_save_16(
750        &mut self,
751        value: Location<Self::GPR, Self::SIMD>,
752        memarg: &MemArg,
753        addr: Location<Self::GPR, Self::SIMD>,
754        need_check: bool,
755        imported_memories: bool,
756        offset: i32,
757        heap_access_oob: Label,
758        unaligned_atomic: Label,
759    ) -> Result<(), CompileError>;
760    /// i32 atomic save
761    #[allow(clippy::too_many_arguments)]
762    fn i32_atomic_save(
763        &mut self,
764        value: Location<Self::GPR, Self::SIMD>,
765        memarg: &MemArg,
766        addr: Location<Self::GPR, Self::SIMD>,
767        need_check: bool,
768        imported_memories: bool,
769        offset: i32,
770        heap_access_oob: Label,
771        unaligned_atomic: Label,
772    ) -> Result<(), CompileError>;
773    /// i32 atomic save of a the lower 8bits
774    #[allow(clippy::too_many_arguments)]
775    fn i32_atomic_save_8(
776        &mut self,
777        value: Location<Self::GPR, Self::SIMD>,
778        memarg: &MemArg,
779        addr: Location<Self::GPR, Self::SIMD>,
780        need_check: bool,
781        imported_memories: bool,
782        offset: i32,
783        heap_access_oob: Label,
784        unaligned_atomic: Label,
785    ) -> Result<(), CompileError>;
786    /// i32 atomic save of a the lower 16bits
787    #[allow(clippy::too_many_arguments)]
788    fn i32_atomic_save_16(
789        &mut self,
790        value: Location<Self::GPR, Self::SIMD>,
791        memarg: &MemArg,
792        addr: Location<Self::GPR, Self::SIMD>,
793        need_check: bool,
794        imported_memories: bool,
795        offset: i32,
796        heap_access_oob: Label,
797        unaligned_atomic: Label,
798    ) -> Result<(), CompileError>;
799    /// i32 atomic Add with i32
800    #[allow(clippy::too_many_arguments)]
801    fn i32_atomic_add(
802        &mut self,
803        loc: Location<Self::GPR, Self::SIMD>,
804        target: Location<Self::GPR, Self::SIMD>,
805        memarg: &MemArg,
806        ret: Location<Self::GPR, Self::SIMD>,
807        need_check: bool,
808        imported_memories: bool,
809        offset: i32,
810        heap_access_oob: Label,
811        unaligned_atomic: Label,
812    ) -> Result<(), CompileError>;
813    /// i32 atomic Add with unsigned 8bits
814    #[allow(clippy::too_many_arguments)]
815    fn i32_atomic_add_8u(
816        &mut self,
817        loc: Location<Self::GPR, Self::SIMD>,
818        target: Location<Self::GPR, Self::SIMD>,
819        memarg: &MemArg,
820        ret: Location<Self::GPR, Self::SIMD>,
821        need_check: bool,
822        imported_memories: bool,
823        offset: i32,
824        heap_access_oob: Label,
825        unaligned_atomic: Label,
826    ) -> Result<(), CompileError>;
827    /// i32 atomic Add with unsigned 16bits
828    #[allow(clippy::too_many_arguments)]
829    fn i32_atomic_add_16u(
830        &mut self,
831        loc: Location<Self::GPR, Self::SIMD>,
832        target: Location<Self::GPR, Self::SIMD>,
833        memarg: &MemArg,
834        ret: Location<Self::GPR, Self::SIMD>,
835        need_check: bool,
836        imported_memories: bool,
837        offset: i32,
838        heap_access_oob: Label,
839        unaligned_atomic: Label,
840    ) -> Result<(), CompileError>;
841    /// i32 atomic Sub with i32
842    #[allow(clippy::too_many_arguments)]
843    fn i32_atomic_sub(
844        &mut self,
845        loc: Location<Self::GPR, Self::SIMD>,
846        target: Location<Self::GPR, Self::SIMD>,
847        memarg: &MemArg,
848        ret: Location<Self::GPR, Self::SIMD>,
849        need_check: bool,
850        imported_memories: bool,
851        offset: i32,
852        heap_access_oob: Label,
853        unaligned_atomic: Label,
854    ) -> Result<(), CompileError>;
855    /// i32 atomic Sub with unsigned 8bits
856    #[allow(clippy::too_many_arguments)]
857    fn i32_atomic_sub_8u(
858        &mut self,
859        loc: Location<Self::GPR, Self::SIMD>,
860        target: Location<Self::GPR, Self::SIMD>,
861        memarg: &MemArg,
862        ret: Location<Self::GPR, Self::SIMD>,
863        need_check: bool,
864        imported_memories: bool,
865        offset: i32,
866        heap_access_oob: Label,
867        unaligned_atomic: Label,
868    ) -> Result<(), CompileError>;
869    /// i32 atomic Sub with unsigned 16bits
870    #[allow(clippy::too_many_arguments)]
871    fn i32_atomic_sub_16u(
872        &mut self,
873        loc: Location<Self::GPR, Self::SIMD>,
874        target: Location<Self::GPR, Self::SIMD>,
875        memarg: &MemArg,
876        ret: Location<Self::GPR, Self::SIMD>,
877        need_check: bool,
878        imported_memories: bool,
879        offset: i32,
880        heap_access_oob: Label,
881        unaligned_atomic: Label,
882    ) -> Result<(), CompileError>;
883    /// i32 atomic And with i32
884    #[allow(clippy::too_many_arguments)]
885    fn i32_atomic_and(
886        &mut self,
887        loc: Location<Self::GPR, Self::SIMD>,
888        target: Location<Self::GPR, Self::SIMD>,
889        memarg: &MemArg,
890        ret: Location<Self::GPR, Self::SIMD>,
891        need_check: bool,
892        imported_memories: bool,
893        offset: i32,
894        heap_access_oob: Label,
895        unaligned_atomic: Label,
896    ) -> Result<(), CompileError>;
897    /// i32 atomic And with unsigned 8bits
898    #[allow(clippy::too_many_arguments)]
899    fn i32_atomic_and_8u(
900        &mut self,
901        loc: Location<Self::GPR, Self::SIMD>,
902        target: Location<Self::GPR, Self::SIMD>,
903        memarg: &MemArg,
904        ret: Location<Self::GPR, Self::SIMD>,
905        need_check: bool,
906        imported_memories: bool,
907        offset: i32,
908        heap_access_oob: Label,
909        unaligned_atomic: Label,
910    ) -> Result<(), CompileError>;
911    /// i32 atomic And with unsigned 16bits
912    #[allow(clippy::too_many_arguments)]
913    fn i32_atomic_and_16u(
914        &mut self,
915        loc: Location<Self::GPR, Self::SIMD>,
916        target: Location<Self::GPR, Self::SIMD>,
917        memarg: &MemArg,
918        ret: Location<Self::GPR, Self::SIMD>,
919        need_check: bool,
920        imported_memories: bool,
921        offset: i32,
922        heap_access_oob: Label,
923        unaligned_atomic: Label,
924    ) -> Result<(), CompileError>;
925    /// i32 atomic Or with i32
926    #[allow(clippy::too_many_arguments)]
927    fn i32_atomic_or(
928        &mut self,
929        loc: Location<Self::GPR, Self::SIMD>,
930        target: Location<Self::GPR, Self::SIMD>,
931        memarg: &MemArg,
932        ret: Location<Self::GPR, Self::SIMD>,
933        need_check: bool,
934        imported_memories: bool,
935        offset: i32,
936        heap_access_oob: Label,
937        unaligned_atomic: Label,
938    ) -> Result<(), CompileError>;
939    /// i32 atomic Or with unsigned 8bits
940    #[allow(clippy::too_many_arguments)]
941    fn i32_atomic_or_8u(
942        &mut self,
943        loc: Location<Self::GPR, Self::SIMD>,
944        target: Location<Self::GPR, Self::SIMD>,
945        memarg: &MemArg,
946        ret: Location<Self::GPR, Self::SIMD>,
947        need_check: bool,
948        imported_memories: bool,
949        offset: i32,
950        heap_access_oob: Label,
951        unaligned_atomic: Label,
952    ) -> Result<(), CompileError>;
953    /// i32 atomic Or with unsigned 16bits
954    #[allow(clippy::too_many_arguments)]
955    fn i32_atomic_or_16u(
956        &mut self,
957        loc: Location<Self::GPR, Self::SIMD>,
958        target: Location<Self::GPR, Self::SIMD>,
959        memarg: &MemArg,
960        ret: Location<Self::GPR, Self::SIMD>,
961        need_check: bool,
962        imported_memories: bool,
963        offset: i32,
964        heap_access_oob: Label,
965        unaligned_atomic: Label,
966    ) -> Result<(), CompileError>;
967    /// i32 atomic Xor with i32
968    #[allow(clippy::too_many_arguments)]
969    fn i32_atomic_xor(
970        &mut self,
971        loc: Location<Self::GPR, Self::SIMD>,
972        target: Location<Self::GPR, Self::SIMD>,
973        memarg: &MemArg,
974        ret: Location<Self::GPR, Self::SIMD>,
975        need_check: bool,
976        imported_memories: bool,
977        offset: i32,
978        heap_access_oob: Label,
979        unaligned_atomic: Label,
980    ) -> Result<(), CompileError>;
981    /// i32 atomic Xor with unsigned 8bits
982    #[allow(clippy::too_many_arguments)]
983    fn i32_atomic_xor_8u(
984        &mut self,
985        loc: Location<Self::GPR, Self::SIMD>,
986        target: Location<Self::GPR, Self::SIMD>,
987        memarg: &MemArg,
988        ret: Location<Self::GPR, Self::SIMD>,
989        need_check: bool,
990        imported_memories: bool,
991        offset: i32,
992        heap_access_oob: Label,
993        unaligned_atomic: Label,
994    ) -> Result<(), CompileError>;
995    /// i32 atomic Xor with unsigned 16bits
996    #[allow(clippy::too_many_arguments)]
997    fn i32_atomic_xor_16u(
998        &mut self,
999        loc: Location<Self::GPR, Self::SIMD>,
1000        target: Location<Self::GPR, Self::SIMD>,
1001        memarg: &MemArg,
1002        ret: Location<Self::GPR, Self::SIMD>,
1003        need_check: bool,
1004        imported_memories: bool,
1005        offset: i32,
1006        heap_access_oob: Label,
1007        unaligned_atomic: Label,
1008    ) -> Result<(), CompileError>;
1009    /// i32 atomic Exchange with i32
1010    #[allow(clippy::too_many_arguments)]
1011    fn i32_atomic_xchg(
1012        &mut self,
1013        loc: Location<Self::GPR, Self::SIMD>,
1014        target: Location<Self::GPR, Self::SIMD>,
1015        memarg: &MemArg,
1016        ret: Location<Self::GPR, Self::SIMD>,
1017        need_check: bool,
1018        imported_memories: bool,
1019        offset: i32,
1020        heap_access_oob: Label,
1021        unaligned_atomic: Label,
1022    ) -> Result<(), CompileError>;
1023    /// i32 atomic Exchange with u8
1024    #[allow(clippy::too_many_arguments)]
1025    fn i32_atomic_xchg_8u(
1026        &mut self,
1027        loc: Location<Self::GPR, Self::SIMD>,
1028        target: Location<Self::GPR, Self::SIMD>,
1029        memarg: &MemArg,
1030        ret: Location<Self::GPR, Self::SIMD>,
1031        need_check: bool,
1032        imported_memories: bool,
1033        offset: i32,
1034        heap_access_oob: Label,
1035        unaligned_atomic: Label,
1036    ) -> Result<(), CompileError>;
1037    /// i32 atomic Exchange with u16
1038    #[allow(clippy::too_many_arguments)]
1039    fn i32_atomic_xchg_16u(
1040        &mut self,
1041        loc: Location<Self::GPR, Self::SIMD>,
1042        target: Location<Self::GPR, Self::SIMD>,
1043        memarg: &MemArg,
1044        ret: Location<Self::GPR, Self::SIMD>,
1045        need_check: bool,
1046        imported_memories: bool,
1047        offset: i32,
1048        heap_access_oob: Label,
1049        unaligned_atomic: Label,
1050    ) -> Result<(), CompileError>;
1051    /// i32 atomic Compare and Exchange with i32
1052    #[allow(clippy::too_many_arguments)]
1053    fn i32_atomic_cmpxchg(
1054        &mut self,
1055        new: Location<Self::GPR, Self::SIMD>,
1056        cmp: Location<Self::GPR, Self::SIMD>,
1057        target: Location<Self::GPR, Self::SIMD>,
1058        memarg: &MemArg,
1059        ret: Location<Self::GPR, Self::SIMD>,
1060        need_check: bool,
1061        imported_memories: bool,
1062        offset: i32,
1063        heap_access_oob: Label,
1064        unaligned_atomic: Label,
1065    ) -> Result<(), CompileError>;
1066    /// i32 atomic Compare and Exchange with u8
1067    #[allow(clippy::too_many_arguments)]
1068    fn i32_atomic_cmpxchg_8u(
1069        &mut self,
1070        new: Location<Self::GPR, Self::SIMD>,
1071        cmp: Location<Self::GPR, Self::SIMD>,
1072        target: Location<Self::GPR, Self::SIMD>,
1073        memarg: &MemArg,
1074        ret: Location<Self::GPR, Self::SIMD>,
1075        need_check: bool,
1076        imported_memories: bool,
1077        offset: i32,
1078        heap_access_oob: Label,
1079        unaligned_atomic: Label,
1080    ) -> Result<(), CompileError>;
1081    /// i32 atomic Compare and Exchange with u16
1082    #[allow(clippy::too_many_arguments)]
1083    fn i32_atomic_cmpxchg_16u(
1084        &mut self,
1085        new: Location<Self::GPR, Self::SIMD>,
1086        cmp: Location<Self::GPR, Self::SIMD>,
1087        target: Location<Self::GPR, Self::SIMD>,
1088        memarg: &MemArg,
1089        ret: Location<Self::GPR, Self::SIMD>,
1090        need_check: bool,
1091        imported_memories: bool,
1092        offset: i32,
1093        heap_access_oob: Label,
1094        unaligned_atomic: Label,
1095    ) -> Result<(), CompileError>;
1096
1097    /// emit a move function address to GPR ready for call, using appropriate relocation
1098    fn emit_call_with_reloc(
1099        &mut self,
1100        calling_convention: CallingConvention,
1101        reloc_target: RelocationTarget,
1102    ) -> Result<Vec<Relocation>, CompileError>;
1103    /// Add with location directly from the stack
1104    fn emit_binop_add64(
1105        &mut self,
1106        loc_a: Location<Self::GPR, Self::SIMD>,
1107        loc_b: Location<Self::GPR, Self::SIMD>,
1108        ret: Location<Self::GPR, Self::SIMD>,
1109    ) -> Result<(), CompileError>;
1110    /// Sub with location directly from the stack
1111    fn emit_binop_sub64(
1112        &mut self,
1113        loc_a: Location<Self::GPR, Self::SIMD>,
1114        loc_b: Location<Self::GPR, Self::SIMD>,
1115        ret: Location<Self::GPR, Self::SIMD>,
1116    ) -> Result<(), CompileError>;
1117    /// Multiply with location directly from the stack
1118    fn emit_binop_mul64(
1119        &mut self,
1120        loc_a: Location<Self::GPR, Self::SIMD>,
1121        loc_b: Location<Self::GPR, Self::SIMD>,
1122        ret: Location<Self::GPR, Self::SIMD>,
1123    ) -> Result<(), CompileError>;
1124    /// Unsigned Division with location directly from the stack. return the offset of the DIV opcode, to mark as trappable.
1125    fn emit_binop_udiv64(
1126        &mut self,
1127        loc_a: Location<Self::GPR, Self::SIMD>,
1128        loc_b: Location<Self::GPR, Self::SIMD>,
1129        ret: Location<Self::GPR, Self::SIMD>,
1130        integer_division_by_zero: Label,
1131    ) -> Result<usize, CompileError>;
1132    /// Signed Division with location directly from the stack. return the offset of the DIV opcode, to mark as trappable.
1133    fn emit_binop_sdiv64(
1134        &mut self,
1135        loc_a: Location<Self::GPR, Self::SIMD>,
1136        loc_b: Location<Self::GPR, Self::SIMD>,
1137        ret: Location<Self::GPR, Self::SIMD>,
1138        integer_division_by_zero: Label,
1139        integer_overflow: Label,
1140    ) -> Result<usize, CompileError>;
1141    /// Unsigned Reminder (of a division) with location directly from the stack. return the offset of the DIV opcode, to mark as trappable.
1142    fn emit_binop_urem64(
1143        &mut self,
1144        loc_a: Location<Self::GPR, Self::SIMD>,
1145        loc_b: Location<Self::GPR, Self::SIMD>,
1146        ret: Location<Self::GPR, Self::SIMD>,
1147        integer_division_by_zero: Label,
1148    ) -> Result<usize, CompileError>;
1149    /// Signed Reminder (of a Division) with location directly from the stack. return the offset of the DIV opcode, to mark as trappable.
1150    fn emit_binop_srem64(
1151        &mut self,
1152        loc_a: Location<Self::GPR, Self::SIMD>,
1153        loc_b: Location<Self::GPR, Self::SIMD>,
1154        ret: Location<Self::GPR, Self::SIMD>,
1155        integer_division_by_zero: Label,
1156    ) -> Result<usize, CompileError>;
1157    /// And with location directly from the stack
1158    fn emit_binop_and64(
1159        &mut self,
1160        loc_a: Location<Self::GPR, Self::SIMD>,
1161        loc_b: Location<Self::GPR, Self::SIMD>,
1162        ret: Location<Self::GPR, Self::SIMD>,
1163    ) -> Result<(), CompileError>;
1164    /// Or with location directly from the stack
1165    fn emit_binop_or64(
1166        &mut self,
1167        loc_a: Location<Self::GPR, Self::SIMD>,
1168        loc_b: Location<Self::GPR, Self::SIMD>,
1169        ret: Location<Self::GPR, Self::SIMD>,
1170    ) -> Result<(), CompileError>;
1171    /// Xor with location directly from the stack
1172    fn emit_binop_xor64(
1173        &mut self,
1174        loc_a: Location<Self::GPR, Self::SIMD>,
1175        loc_b: Location<Self::GPR, Self::SIMD>,
1176        ret: Location<Self::GPR, Self::SIMD>,
1177    ) -> Result<(), CompileError>;
1178    /// Signed Greater of Equal Compare 2 i64, result in a GPR
1179    fn i64_cmp_ge_s(
1180        &mut self,
1181        loc_a: Location<Self::GPR, Self::SIMD>,
1182        loc_b: Location<Self::GPR, Self::SIMD>,
1183        ret: Location<Self::GPR, Self::SIMD>,
1184    ) -> Result<(), CompileError>;
1185    /// Signed Greater Than Compare 2 i64, result in a GPR
1186    fn i64_cmp_gt_s(
1187        &mut self,
1188        loc_a: Location<Self::GPR, Self::SIMD>,
1189        loc_b: Location<Self::GPR, Self::SIMD>,
1190        ret: Location<Self::GPR, Self::SIMD>,
1191    ) -> Result<(), CompileError>;
1192    /// Signed Less of Equal Compare 2 i64, result in a GPR
1193    fn i64_cmp_le_s(
1194        &mut self,
1195        loc_a: Location<Self::GPR, Self::SIMD>,
1196        loc_b: Location<Self::GPR, Self::SIMD>,
1197        ret: Location<Self::GPR, Self::SIMD>,
1198    ) -> Result<(), CompileError>;
1199    /// Signed Less Than Compare 2 i64, result in a GPR
1200    fn i64_cmp_lt_s(
1201        &mut self,
1202        loc_a: Location<Self::GPR, Self::SIMD>,
1203        loc_b: Location<Self::GPR, Self::SIMD>,
1204        ret: Location<Self::GPR, Self::SIMD>,
1205    ) -> Result<(), CompileError>;
1206    /// Unsigned Greater of Equal Compare 2 i64, result in a GPR
1207    fn i64_cmp_ge_u(
1208        &mut self,
1209        loc_a: Location<Self::GPR, Self::SIMD>,
1210        loc_b: Location<Self::GPR, Self::SIMD>,
1211        ret: Location<Self::GPR, Self::SIMD>,
1212    ) -> Result<(), CompileError>;
1213    /// Unsigned Greater Than Compare 2 i64, result in a GPR
1214    fn i64_cmp_gt_u(
1215        &mut self,
1216        loc_a: Location<Self::GPR, Self::SIMD>,
1217        loc_b: Location<Self::GPR, Self::SIMD>,
1218        ret: Location<Self::GPR, Self::SIMD>,
1219    ) -> Result<(), CompileError>;
1220    /// Unsigned Less of Equal Compare 2 i64, result in a GPR
1221    fn i64_cmp_le_u(
1222        &mut self,
1223        loc_a: Location<Self::GPR, Self::SIMD>,
1224        loc_b: Location<Self::GPR, Self::SIMD>,
1225        ret: Location<Self::GPR, Self::SIMD>,
1226    ) -> Result<(), CompileError>;
1227    /// Unsigned Less Than Compare 2 i64, result in a GPR
1228    fn i64_cmp_lt_u(
1229        &mut self,
1230        loc_a: Location<Self::GPR, Self::SIMD>,
1231        loc_b: Location<Self::GPR, Self::SIMD>,
1232        ret: Location<Self::GPR, Self::SIMD>,
1233    ) -> Result<(), CompileError>;
1234    /// Not Equal Compare 2 i64, result in a GPR
1235    fn i64_cmp_ne(
1236        &mut self,
1237        loc_a: Location<Self::GPR, Self::SIMD>,
1238        loc_b: Location<Self::GPR, Self::SIMD>,
1239        ret: Location<Self::GPR, Self::SIMD>,
1240    ) -> Result<(), CompileError>;
1241    /// Equal Compare 2 i64, result in a GPR
1242    fn i64_cmp_eq(
1243        &mut self,
1244        loc_a: Location<Self::GPR, Self::SIMD>,
1245        loc_b: Location<Self::GPR, Self::SIMD>,
1246        ret: Location<Self::GPR, Self::SIMD>,
1247    ) -> Result<(), CompileError>;
1248    /// Count Leading 0 bit of an i64
1249    fn i64_clz(
1250        &mut self,
1251        loc: Location<Self::GPR, Self::SIMD>,
1252        ret: Location<Self::GPR, Self::SIMD>,
1253    ) -> Result<(), CompileError>;
1254    /// Count Trailling 0 bit of an i64
1255    fn i64_ctz(
1256        &mut self,
1257        loc: Location<Self::GPR, Self::SIMD>,
1258        ret: Location<Self::GPR, Self::SIMD>,
1259    ) -> Result<(), CompileError>;
1260    /// Count the number of 1 bit of an i64
1261    fn i64_popcnt(
1262        &mut self,
1263        loc: Location<Self::GPR, Self::SIMD>,
1264        ret: Location<Self::GPR, Self::SIMD>,
1265    ) -> Result<(), CompileError>;
1266    /// i64 Logical Shift Left
1267    fn i64_shl(
1268        &mut self,
1269        loc_a: Location<Self::GPR, Self::SIMD>,
1270        loc_b: Location<Self::GPR, Self::SIMD>,
1271        ret: Location<Self::GPR, Self::SIMD>,
1272    ) -> Result<(), CompileError>;
1273    /// i64 Logical Shift Right
1274    fn i64_shr(
1275        &mut self,
1276        loc_a: Location<Self::GPR, Self::SIMD>,
1277        loc_b: Location<Self::GPR, Self::SIMD>,
1278        ret: Location<Self::GPR, Self::SIMD>,
1279    ) -> Result<(), CompileError>;
1280    /// i64 Arithmetic Shift Right
1281    fn i64_sar(
1282        &mut self,
1283        loc_a: Location<Self::GPR, Self::SIMD>,
1284        loc_b: Location<Self::GPR, Self::SIMD>,
1285        ret: Location<Self::GPR, Self::SIMD>,
1286    ) -> Result<(), CompileError>;
1287    /// i64 Roll Left
1288    fn i64_rol(
1289        &mut self,
1290        loc_a: Location<Self::GPR, Self::SIMD>,
1291        loc_b: Location<Self::GPR, Self::SIMD>,
1292        ret: Location<Self::GPR, Self::SIMD>,
1293    ) -> Result<(), CompileError>;
1294    /// i64 Roll Right
1295    fn i64_ror(
1296        &mut self,
1297        loc_a: Location<Self::GPR, Self::SIMD>,
1298        loc_b: Location<Self::GPR, Self::SIMD>,
1299        ret: Location<Self::GPR, Self::SIMD>,
1300    ) -> Result<(), CompileError>;
1301    /// i64 load
1302    #[allow(clippy::too_many_arguments)]
1303    fn i64_load(
1304        &mut self,
1305        addr: Location<Self::GPR, Self::SIMD>,
1306        memarg: &MemArg,
1307        ret: Location<Self::GPR, Self::SIMD>,
1308        need_check: bool,
1309        imported_memories: bool,
1310        offset: i32,
1311        heap_access_oob: Label,
1312        unaligned_atomic: Label,
1313    ) -> Result<(), CompileError>;
1314    /// i64 load of an unsigned 8bits
1315    #[allow(clippy::too_many_arguments)]
1316    fn i64_load_8u(
1317        &mut self,
1318        addr: Location<Self::GPR, Self::SIMD>,
1319        memarg: &MemArg,
1320        ret: Location<Self::GPR, Self::SIMD>,
1321        need_check: bool,
1322        imported_memories: bool,
1323        offset: i32,
1324        heap_access_oob: Label,
1325        unaligned_atomic: Label,
1326    ) -> Result<(), CompileError>;
1327    /// i64 load of an signed 8bits
1328    #[allow(clippy::too_many_arguments)]
1329    fn i64_load_8s(
1330        &mut self,
1331        addr: Location<Self::GPR, Self::SIMD>,
1332        memarg: &MemArg,
1333        ret: Location<Self::GPR, Self::SIMD>,
1334        need_check: bool,
1335        imported_memories: bool,
1336        offset: i32,
1337        heap_access_oob: Label,
1338        unaligned_atomic: Label,
1339    ) -> Result<(), CompileError>;
1340    /// i64 load of an unsigned 32bits
1341    #[allow(clippy::too_many_arguments)]
1342    fn i64_load_32u(
1343        &mut self,
1344        addr: Location<Self::GPR, Self::SIMD>,
1345        memarg: &MemArg,
1346        ret: Location<Self::GPR, Self::SIMD>,
1347        need_check: bool,
1348        imported_memories: bool,
1349        offset: i32,
1350        heap_access_oob: Label,
1351        unaligned_atomic: Label,
1352    ) -> Result<(), CompileError>;
1353    /// i64 load of an signed 32bits
1354    #[allow(clippy::too_many_arguments)]
1355    fn i64_load_32s(
1356        &mut self,
1357        addr: Location<Self::GPR, Self::SIMD>,
1358        memarg: &MemArg,
1359        ret: Location<Self::GPR, Self::SIMD>,
1360        need_check: bool,
1361        imported_memories: bool,
1362        offset: i32,
1363        heap_access_oob: Label,
1364        unaligned_atomic: Label,
1365    ) -> Result<(), CompileError>;
1366    /// i64 load of an signed 16bits
1367    #[allow(clippy::too_many_arguments)]
1368    fn i64_load_16u(
1369        &mut self,
1370        addr: Location<Self::GPR, Self::SIMD>,
1371        memarg: &MemArg,
1372        ret: Location<Self::GPR, Self::SIMD>,
1373        need_check: bool,
1374        imported_memories: bool,
1375        offset: i32,
1376        heap_access_oob: Label,
1377        unaligned_atomic: Label,
1378    ) -> Result<(), CompileError>;
1379    /// i64 load of an signed 16bits
1380    #[allow(clippy::too_many_arguments)]
1381    fn i64_load_16s(
1382        &mut self,
1383        addr: Location<Self::GPR, Self::SIMD>,
1384        memarg: &MemArg,
1385        ret: Location<Self::GPR, Self::SIMD>,
1386        need_check: bool,
1387        imported_memories: bool,
1388        offset: i32,
1389        heap_access_oob: Label,
1390        unaligned_atomic: Label,
1391    ) -> Result<(), CompileError>;
1392    /// i64 atomic load
1393    #[allow(clippy::too_many_arguments)]
1394    fn i64_atomic_load(
1395        &mut self,
1396        addr: Location<Self::GPR, Self::SIMD>,
1397        memarg: &MemArg,
1398        ret: Location<Self::GPR, Self::SIMD>,
1399        need_check: bool,
1400        imported_memories: bool,
1401        offset: i32,
1402        heap_access_oob: Label,
1403        unaligned_atomic: Label,
1404    ) -> Result<(), CompileError>;
1405    /// i64 atomic load from unsigned 8bits
1406    #[allow(clippy::too_many_arguments)]
1407    fn i64_atomic_load_8u(
1408        &mut self,
1409        addr: Location<Self::GPR, Self::SIMD>,
1410        memarg: &MemArg,
1411        ret: Location<Self::GPR, Self::SIMD>,
1412        need_check: bool,
1413        imported_memories: bool,
1414        offset: i32,
1415        heap_access_oob: Label,
1416        unaligned_atomic: Label,
1417    ) -> Result<(), CompileError>;
1418    /// i64 atomic load from unsigned 16bits
1419    #[allow(clippy::too_many_arguments)]
1420    fn i64_atomic_load_16u(
1421        &mut self,
1422        addr: Location<Self::GPR, Self::SIMD>,
1423        memarg: &MemArg,
1424        ret: Location<Self::GPR, Self::SIMD>,
1425        need_check: bool,
1426        imported_memories: bool,
1427        offset: i32,
1428        heap_access_oob: Label,
1429        unaligned_atomic: Label,
1430    ) -> Result<(), CompileError>;
1431    /// i64 atomic load from unsigned 32bits
1432    #[allow(clippy::too_many_arguments)]
1433    fn i64_atomic_load_32u(
1434        &mut self,
1435        addr: Location<Self::GPR, Self::SIMD>,
1436        memarg: &MemArg,
1437        ret: Location<Self::GPR, Self::SIMD>,
1438        need_check: bool,
1439        imported_memories: bool,
1440        offset: i32,
1441        heap_access_oob: Label,
1442        unaligned_atomic: Label,
1443    ) -> Result<(), CompileError>;
1444    /// i64 save
1445    #[allow(clippy::too_many_arguments)]
1446    fn i64_save(
1447        &mut self,
1448        value: Location<Self::GPR, Self::SIMD>,
1449        memarg: &MemArg,
1450        addr: Location<Self::GPR, Self::SIMD>,
1451        need_check: bool,
1452        imported_memories: bool,
1453        offset: i32,
1454        heap_access_oob: Label,
1455        unaligned_atomic: Label,
1456    ) -> Result<(), CompileError>;
1457    /// i64 save of the lower 8bits
1458    #[allow(clippy::too_many_arguments)]
1459    fn i64_save_8(
1460        &mut self,
1461        value: Location<Self::GPR, Self::SIMD>,
1462        memarg: &MemArg,
1463        addr: Location<Self::GPR, Self::SIMD>,
1464        need_check: bool,
1465        imported_memories: bool,
1466        offset: i32,
1467        heap_access_oob: Label,
1468        unaligned_atomic: Label,
1469    ) -> Result<(), CompileError>;
1470    /// i64 save of the lower 16bits
1471    #[allow(clippy::too_many_arguments)]
1472    fn i64_save_16(
1473        &mut self,
1474        value: Location<Self::GPR, Self::SIMD>,
1475        memarg: &MemArg,
1476        addr: Location<Self::GPR, Self::SIMD>,
1477        need_check: bool,
1478        imported_memories: bool,
1479        offset: i32,
1480        heap_access_oob: Label,
1481        unaligned_atomic: Label,
1482    ) -> Result<(), CompileError>;
1483    /// i64 save of the lower 32bits
1484    #[allow(clippy::too_many_arguments)]
1485    fn i64_save_32(
1486        &mut self,
1487        value: Location<Self::GPR, Self::SIMD>,
1488        memarg: &MemArg,
1489        addr: Location<Self::GPR, Self::SIMD>,
1490        need_check: bool,
1491        imported_memories: bool,
1492        offset: i32,
1493        heap_access_oob: Label,
1494        unaligned_atomic: Label,
1495    ) -> Result<(), CompileError>;
1496    /// i64 atomic save
1497    #[allow(clippy::too_many_arguments)]
1498    fn i64_atomic_save(
1499        &mut self,
1500        value: Location<Self::GPR, Self::SIMD>,
1501        memarg: &MemArg,
1502        addr: Location<Self::GPR, Self::SIMD>,
1503        need_check: bool,
1504        imported_memories: bool,
1505        offset: i32,
1506        heap_access_oob: Label,
1507        unaligned_atomic: Label,
1508    ) -> Result<(), CompileError>;
1509    /// i64 atomic save of a the lower 8bits
1510    #[allow(clippy::too_many_arguments)]
1511    fn i64_atomic_save_8(
1512        &mut self,
1513        value: Location<Self::GPR, Self::SIMD>,
1514        memarg: &MemArg,
1515        addr: Location<Self::GPR, Self::SIMD>,
1516        need_check: bool,
1517        imported_memories: bool,
1518        offset: i32,
1519        heap_access_oob: Label,
1520        unaligned_atomic: Label,
1521    ) -> Result<(), CompileError>;
1522    /// i64 atomic save of a the lower 16bits
1523    #[allow(clippy::too_many_arguments)]
1524    fn i64_atomic_save_16(
1525        &mut self,
1526        value: Location<Self::GPR, Self::SIMD>,
1527        memarg: &MemArg,
1528        addr: Location<Self::GPR, Self::SIMD>,
1529        need_check: bool,
1530        imported_memories: bool,
1531        offset: i32,
1532        heap_access_oob: Label,
1533        unaligned_atomic: Label,
1534    ) -> Result<(), CompileError>;
1535    /// i64 atomic save of a the lower 32bits
1536    #[allow(clippy::too_many_arguments)]
1537    fn i64_atomic_save_32(
1538        &mut self,
1539        value: Location<Self::GPR, Self::SIMD>,
1540        memarg: &MemArg,
1541        addr: Location<Self::GPR, Self::SIMD>,
1542        need_check: bool,
1543        imported_memories: bool,
1544        offset: i32,
1545        heap_access_oob: Label,
1546        unaligned_atomic: Label,
1547    ) -> Result<(), CompileError>;
1548    /// i64 atomic Add with i64
1549    #[allow(clippy::too_many_arguments)]
1550    fn i64_atomic_add(
1551        &mut self,
1552        loc: Location<Self::GPR, Self::SIMD>,
1553        target: Location<Self::GPR, Self::SIMD>,
1554        memarg: &MemArg,
1555        ret: Location<Self::GPR, Self::SIMD>,
1556        need_check: bool,
1557        imported_memories: bool,
1558        offset: i32,
1559        heap_access_oob: Label,
1560        unaligned_atomic: Label,
1561    ) -> Result<(), CompileError>;
1562    /// i64 atomic Add with unsigned 8bits
1563    #[allow(clippy::too_many_arguments)]
1564    fn i64_atomic_add_8u(
1565        &mut self,
1566        loc: Location<Self::GPR, Self::SIMD>,
1567        target: Location<Self::GPR, Self::SIMD>,
1568        memarg: &MemArg,
1569        ret: Location<Self::GPR, Self::SIMD>,
1570        need_check: bool,
1571        imported_memories: bool,
1572        offset: i32,
1573        heap_access_oob: Label,
1574        unaligned_atomic: Label,
1575    ) -> Result<(), CompileError>;
1576    /// i64 atomic Add with unsigned 16bits
1577    #[allow(clippy::too_many_arguments)]
1578    fn i64_atomic_add_16u(
1579        &mut self,
1580        loc: Location<Self::GPR, Self::SIMD>,
1581        target: Location<Self::GPR, Self::SIMD>,
1582        memarg: &MemArg,
1583        ret: Location<Self::GPR, Self::SIMD>,
1584        need_check: bool,
1585        imported_memories: bool,
1586        offset: i32,
1587        heap_access_oob: Label,
1588        unaligned_atomic: Label,
1589    ) -> Result<(), CompileError>;
1590    /// i64 atomic Add with unsigned 32bits
1591    #[allow(clippy::too_many_arguments)]
1592    fn i64_atomic_add_32u(
1593        &mut self,
1594        loc: Location<Self::GPR, Self::SIMD>,
1595        target: Location<Self::GPR, Self::SIMD>,
1596        memarg: &MemArg,
1597        ret: Location<Self::GPR, Self::SIMD>,
1598        need_check: bool,
1599        imported_memories: bool,
1600        offset: i32,
1601        heap_access_oob: Label,
1602        unaligned_atomic: Label,
1603    ) -> Result<(), CompileError>;
1604    /// i64 atomic Sub with i64
1605    #[allow(clippy::too_many_arguments)]
1606    fn i64_atomic_sub(
1607        &mut self,
1608        loc: Location<Self::GPR, Self::SIMD>,
1609        target: Location<Self::GPR, Self::SIMD>,
1610        memarg: &MemArg,
1611        ret: Location<Self::GPR, Self::SIMD>,
1612        need_check: bool,
1613        imported_memories: bool,
1614        offset: i32,
1615        heap_access_oob: Label,
1616        unaligned_atomic: Label,
1617    ) -> Result<(), CompileError>;
1618    /// i64 atomic Sub with unsigned 8bits
1619    #[allow(clippy::too_many_arguments)]
1620    fn i64_atomic_sub_8u(
1621        &mut self,
1622        loc: Location<Self::GPR, Self::SIMD>,
1623        target: Location<Self::GPR, Self::SIMD>,
1624        memarg: &MemArg,
1625        ret: Location<Self::GPR, Self::SIMD>,
1626        need_check: bool,
1627        imported_memories: bool,
1628        offset: i32,
1629        heap_access_oob: Label,
1630        unaligned_atomic: Label,
1631    ) -> Result<(), CompileError>;
1632    /// i64 atomic Sub with unsigned 16bits
1633    #[allow(clippy::too_many_arguments)]
1634    fn i64_atomic_sub_16u(
1635        &mut self,
1636        loc: Location<Self::GPR, Self::SIMD>,
1637        target: Location<Self::GPR, Self::SIMD>,
1638        memarg: &MemArg,
1639        ret: Location<Self::GPR, Self::SIMD>,
1640        need_check: bool,
1641        imported_memories: bool,
1642        offset: i32,
1643        heap_access_oob: Label,
1644        unaligned_atomic: Label,
1645    ) -> Result<(), CompileError>;
1646    /// i64 atomic Sub with unsigned 32bits
1647    #[allow(clippy::too_many_arguments)]
1648    fn i64_atomic_sub_32u(
1649        &mut self,
1650        loc: Location<Self::GPR, Self::SIMD>,
1651        target: Location<Self::GPR, Self::SIMD>,
1652        memarg: &MemArg,
1653        ret: Location<Self::GPR, Self::SIMD>,
1654        need_check: bool,
1655        imported_memories: bool,
1656        offset: i32,
1657        heap_access_oob: Label,
1658        unaligned_atomic: Label,
1659    ) -> Result<(), CompileError>;
1660    /// i64 atomic And with i64
1661    #[allow(clippy::too_many_arguments)]
1662    fn i64_atomic_and(
1663        &mut self,
1664        loc: Location<Self::GPR, Self::SIMD>,
1665        target: Location<Self::GPR, Self::SIMD>,
1666        memarg: &MemArg,
1667        ret: Location<Self::GPR, Self::SIMD>,
1668        need_check: bool,
1669        imported_memories: bool,
1670        offset: i32,
1671        heap_access_oob: Label,
1672        unaligned_atomic: Label,
1673    ) -> Result<(), CompileError>;
1674    /// i64 atomic And with unsigned 8bits
1675    #[allow(clippy::too_many_arguments)]
1676    fn i64_atomic_and_8u(
1677        &mut self,
1678        loc: Location<Self::GPR, Self::SIMD>,
1679        target: Location<Self::GPR, Self::SIMD>,
1680        memarg: &MemArg,
1681        ret: Location<Self::GPR, Self::SIMD>,
1682        need_check: bool,
1683        imported_memories: bool,
1684        offset: i32,
1685        heap_access_oob: Label,
1686        unaligned_atomic: Label,
1687    ) -> Result<(), CompileError>;
1688    /// i64 atomic And with unsigned 16bits
1689    #[allow(clippy::too_many_arguments)]
1690    fn i64_atomic_and_16u(
1691        &mut self,
1692        loc: Location<Self::GPR, Self::SIMD>,
1693        target: Location<Self::GPR, Self::SIMD>,
1694        memarg: &MemArg,
1695        ret: Location<Self::GPR, Self::SIMD>,
1696        need_check: bool,
1697        imported_memories: bool,
1698        offset: i32,
1699        heap_access_oob: Label,
1700        unaligned_atomic: Label,
1701    ) -> Result<(), CompileError>;
1702    /// i64 atomic And with unsigned 32bits
1703    #[allow(clippy::too_many_arguments)]
1704    fn i64_atomic_and_32u(
1705        &mut self,
1706        loc: Location<Self::GPR, Self::SIMD>,
1707        target: Location<Self::GPR, Self::SIMD>,
1708        memarg: &MemArg,
1709        ret: Location<Self::GPR, Self::SIMD>,
1710        need_check: bool,
1711        imported_memories: bool,
1712        offset: i32,
1713        heap_access_oob: Label,
1714        unaligned_atomic: Label,
1715    ) -> Result<(), CompileError>;
1716    /// i64 atomic Or with i64
1717    #[allow(clippy::too_many_arguments)]
1718    fn i64_atomic_or(
1719        &mut self,
1720        loc: Location<Self::GPR, Self::SIMD>,
1721        target: Location<Self::GPR, Self::SIMD>,
1722        memarg: &MemArg,
1723        ret: Location<Self::GPR, Self::SIMD>,
1724        need_check: bool,
1725        imported_memories: bool,
1726        offset: i32,
1727        heap_access_oob: Label,
1728        unaligned_atomic: Label,
1729    ) -> Result<(), CompileError>;
1730    /// i64 atomic Or with unsigned 8bits
1731    #[allow(clippy::too_many_arguments)]
1732    fn i64_atomic_or_8u(
1733        &mut self,
1734        loc: Location<Self::GPR, Self::SIMD>,
1735        target: Location<Self::GPR, Self::SIMD>,
1736        memarg: &MemArg,
1737        ret: Location<Self::GPR, Self::SIMD>,
1738        need_check: bool,
1739        imported_memories: bool,
1740        offset: i32,
1741        heap_access_oob: Label,
1742        unaligned_atomic: Label,
1743    ) -> Result<(), CompileError>;
1744    /// i64 atomic Or with unsigned 16bits
1745    #[allow(clippy::too_many_arguments)]
1746    fn i64_atomic_or_16u(
1747        &mut self,
1748        loc: Location<Self::GPR, Self::SIMD>,
1749        target: Location<Self::GPR, Self::SIMD>,
1750        memarg: &MemArg,
1751        ret: Location<Self::GPR, Self::SIMD>,
1752        need_check: bool,
1753        imported_memories: bool,
1754        offset: i32,
1755        heap_access_oob: Label,
1756        unaligned_atomic: Label,
1757    ) -> Result<(), CompileError>;
1758    /// i64 atomic Or with unsigned 32bits
1759    #[allow(clippy::too_many_arguments)]
1760    fn i64_atomic_or_32u(
1761        &mut self,
1762        loc: Location<Self::GPR, Self::SIMD>,
1763        target: Location<Self::GPR, Self::SIMD>,
1764        memarg: &MemArg,
1765        ret: Location<Self::GPR, Self::SIMD>,
1766        need_check: bool,
1767        imported_memories: bool,
1768        offset: i32,
1769        heap_access_oob: Label,
1770        unaligned_atomic: Label,
1771    ) -> Result<(), CompileError>;
1772    /// i64 atomic Xor with i64
1773    #[allow(clippy::too_many_arguments)]
1774    fn i64_atomic_xor(
1775        &mut self,
1776        loc: Location<Self::GPR, Self::SIMD>,
1777        target: Location<Self::GPR, Self::SIMD>,
1778        memarg: &MemArg,
1779        ret: Location<Self::GPR, Self::SIMD>,
1780        need_check: bool,
1781        imported_memories: bool,
1782        offset: i32,
1783        heap_access_oob: Label,
1784        unaligned_atomic: Label,
1785    ) -> Result<(), CompileError>;
1786    /// i64 atomic Xor with unsigned 8bits
1787    #[allow(clippy::too_many_arguments)]
1788    fn i64_atomic_xor_8u(
1789        &mut self,
1790        loc: Location<Self::GPR, Self::SIMD>,
1791        target: Location<Self::GPR, Self::SIMD>,
1792        memarg: &MemArg,
1793        ret: Location<Self::GPR, Self::SIMD>,
1794        need_check: bool,
1795        imported_memories: bool,
1796        offset: i32,
1797        heap_access_oob: Label,
1798        unaligned_atomic: Label,
1799    ) -> Result<(), CompileError>;
1800    /// i64 atomic Xor with unsigned 16bits
1801    #[allow(clippy::too_many_arguments)]
1802    fn i64_atomic_xor_16u(
1803        &mut self,
1804        loc: Location<Self::GPR, Self::SIMD>,
1805        target: Location<Self::GPR, Self::SIMD>,
1806        memarg: &MemArg,
1807        ret: Location<Self::GPR, Self::SIMD>,
1808        need_check: bool,
1809        imported_memories: bool,
1810        offset: i32,
1811        heap_access_oob: Label,
1812        unaligned_atomic: Label,
1813    ) -> Result<(), CompileError>;
1814    /// i64 atomic Xor with unsigned 32bits
1815    #[allow(clippy::too_many_arguments)]
1816    fn i64_atomic_xor_32u(
1817        &mut self,
1818        loc: Location<Self::GPR, Self::SIMD>,
1819        target: Location<Self::GPR, Self::SIMD>,
1820        memarg: &MemArg,
1821        ret: Location<Self::GPR, Self::SIMD>,
1822        need_check: bool,
1823        imported_memories: bool,
1824        offset: i32,
1825        heap_access_oob: Label,
1826        unaligned_atomic: Label,
1827    ) -> Result<(), CompileError>;
1828    /// i64 atomic Exchange with i64
1829    #[allow(clippy::too_many_arguments)]
1830    fn i64_atomic_xchg(
1831        &mut self,
1832        loc: Location<Self::GPR, Self::SIMD>,
1833        target: Location<Self::GPR, Self::SIMD>,
1834        memarg: &MemArg,
1835        ret: Location<Self::GPR, Self::SIMD>,
1836        need_check: bool,
1837        imported_memories: bool,
1838        offset: i32,
1839        heap_access_oob: Label,
1840        unaligned_atomic: Label,
1841    ) -> Result<(), CompileError>;
1842    /// i64 atomic Exchange with u8
1843    #[allow(clippy::too_many_arguments)]
1844    fn i64_atomic_xchg_8u(
1845        &mut self,
1846        loc: Location<Self::GPR, Self::SIMD>,
1847        target: Location<Self::GPR, Self::SIMD>,
1848        memarg: &MemArg,
1849        ret: Location<Self::GPR, Self::SIMD>,
1850        need_check: bool,
1851        imported_memories: bool,
1852        offset: i32,
1853        heap_access_oob: Label,
1854        unaligned_atomic: Label,
1855    ) -> Result<(), CompileError>;
1856    /// i64 atomic Exchange with u16
1857    #[allow(clippy::too_many_arguments)]
1858    fn i64_atomic_xchg_16u(
1859        &mut self,
1860        loc: Location<Self::GPR, Self::SIMD>,
1861        target: Location<Self::GPR, Self::SIMD>,
1862        memarg: &MemArg,
1863        ret: Location<Self::GPR, Self::SIMD>,
1864        need_check: bool,
1865        imported_memories: bool,
1866        offset: i32,
1867        heap_access_oob: Label,
1868        unaligned_atomic: Label,
1869    ) -> Result<(), CompileError>;
1870    /// i64 atomic Exchange with u32
1871    #[allow(clippy::too_many_arguments)]
1872    fn i64_atomic_xchg_32u(
1873        &mut self,
1874        loc: Location<Self::GPR, Self::SIMD>,
1875        target: Location<Self::GPR, Self::SIMD>,
1876        memarg: &MemArg,
1877        ret: Location<Self::GPR, Self::SIMD>,
1878        need_check: bool,
1879        imported_memories: bool,
1880        offset: i32,
1881        heap_access_oob: Label,
1882        unaligned_atomic: Label,
1883    ) -> Result<(), CompileError>;
1884    /// i64 atomic Compare and Exchange with i32
1885    #[allow(clippy::too_many_arguments)]
1886    fn i64_atomic_cmpxchg(
1887        &mut self,
1888        new: Location<Self::GPR, Self::SIMD>,
1889        cmp: Location<Self::GPR, Self::SIMD>,
1890        target: Location<Self::GPR, Self::SIMD>,
1891        memarg: &MemArg,
1892        ret: Location<Self::GPR, Self::SIMD>,
1893        need_check: bool,
1894        imported_memories: bool,
1895        offset: i32,
1896        heap_access_oob: Label,
1897        unaligned_atomic: Label,
1898    ) -> Result<(), CompileError>;
1899    /// i64 atomic Compare and Exchange with u8
1900    #[allow(clippy::too_many_arguments)]
1901    fn i64_atomic_cmpxchg_8u(
1902        &mut self,
1903        new: Location<Self::GPR, Self::SIMD>,
1904        cmp: Location<Self::GPR, Self::SIMD>,
1905        target: Location<Self::GPR, Self::SIMD>,
1906        memarg: &MemArg,
1907        ret: Location<Self::GPR, Self::SIMD>,
1908        need_check: bool,
1909        imported_memories: bool,
1910        offset: i32,
1911        heap_access_oob: Label,
1912        unaligned_atomic: Label,
1913    ) -> Result<(), CompileError>;
1914    /// i64 atomic Compare and Exchange with u16
1915    #[allow(clippy::too_many_arguments)]
1916    fn i64_atomic_cmpxchg_16u(
1917        &mut self,
1918        new: Location<Self::GPR, Self::SIMD>,
1919        cmp: Location<Self::GPR, Self::SIMD>,
1920        target: Location<Self::GPR, Self::SIMD>,
1921        memarg: &MemArg,
1922        ret: Location<Self::GPR, Self::SIMD>,
1923        need_check: bool,
1924        imported_memories: bool,
1925        offset: i32,
1926        heap_access_oob: Label,
1927        unaligned_atomic: Label,
1928    ) -> Result<(), CompileError>;
1929    /// i64 atomic Compare and Exchange with u32
1930    #[allow(clippy::too_many_arguments)]
1931    fn i64_atomic_cmpxchg_32u(
1932        &mut self,
1933        new: Location<Self::GPR, Self::SIMD>,
1934        cmp: Location<Self::GPR, Self::SIMD>,
1935        target: Location<Self::GPR, Self::SIMD>,
1936        memarg: &MemArg,
1937        ret: Location<Self::GPR, Self::SIMD>,
1938        need_check: bool,
1939        imported_memories: bool,
1940        offset: i32,
1941        heap_access_oob: Label,
1942        unaligned_atomic: Label,
1943    ) -> Result<(), CompileError>;
1944
1945    /// load an F32
1946    #[allow(clippy::too_many_arguments)]
1947    fn f32_load(
1948        &mut self,
1949        addr: Location<Self::GPR, Self::SIMD>,
1950        memarg: &MemArg,
1951        ret: Location<Self::GPR, Self::SIMD>,
1952        need_check: bool,
1953        imported_memories: bool,
1954        offset: i32,
1955        heap_access_oob: Label,
1956        unaligned_atomic: Label,
1957    ) -> Result<(), CompileError>;
1958    /// f32 save
1959    #[allow(clippy::too_many_arguments)]
1960    fn f32_save(
1961        &mut self,
1962        value: Location<Self::GPR, Self::SIMD>,
1963        memarg: &MemArg,
1964        addr: Location<Self::GPR, Self::SIMD>,
1965        canonicalize: bool,
1966        need_check: bool,
1967        imported_memories: bool,
1968        offset: i32,
1969        heap_access_oob: Label,
1970        unaligned_atomic: Label,
1971    ) -> Result<(), CompileError>;
1972    /// load an F64
1973    #[allow(clippy::too_many_arguments)]
1974    fn f64_load(
1975        &mut self,
1976        addr: Location<Self::GPR, Self::SIMD>,
1977        memarg: &MemArg,
1978        ret: Location<Self::GPR, Self::SIMD>,
1979        need_check: bool,
1980        imported_memories: bool,
1981        offset: i32,
1982        heap_access_oob: Label,
1983        unaligned_atomic: Label,
1984    ) -> Result<(), CompileError>;
1985    /// f64 save
1986    #[allow(clippy::too_many_arguments)]
1987    fn f64_save(
1988        &mut self,
1989        value: Location<Self::GPR, Self::SIMD>,
1990        memarg: &MemArg,
1991        addr: Location<Self::GPR, Self::SIMD>,
1992        canonicalize: bool,
1993        need_check: bool,
1994        imported_memories: bool,
1995        offset: i32,
1996        heap_access_oob: Label,
1997        unaligned_atomic: Label,
1998    ) -> Result<(), CompileError>;
1999    /// Convert a F64 from I64, signed or unsigned
2000    fn convert_f64_i64(
2001        &mut self,
2002        loc: Location<Self::GPR, Self::SIMD>,
2003        signed: bool,
2004        ret: Location<Self::GPR, Self::SIMD>,
2005    ) -> Result<(), CompileError>;
2006    /// Convert a F64 from I32, signed or unsigned
2007    fn convert_f64_i32(
2008        &mut self,
2009        loc: Location<Self::GPR, Self::SIMD>,
2010        signed: bool,
2011        ret: Location<Self::GPR, Self::SIMD>,
2012    ) -> Result<(), CompileError>;
2013    /// Convert a F32 from I64, signed or unsigned
2014    fn convert_f32_i64(
2015        &mut self,
2016        loc: Location<Self::GPR, Self::SIMD>,
2017        signed: bool,
2018        ret: Location<Self::GPR, Self::SIMD>,
2019    ) -> Result<(), CompileError>;
2020    /// Convert a F32 from I32, signed or unsigned
2021    fn convert_f32_i32(
2022        &mut self,
2023        loc: Location<Self::GPR, Self::SIMD>,
2024        signed: bool,
2025        ret: Location<Self::GPR, Self::SIMD>,
2026    ) -> Result<(), CompileError>;
2027    /// Convert a F64 to I64, signed or unsigned, without or without saturation
2028    fn convert_i64_f64(
2029        &mut self,
2030        loc: Location<Self::GPR, Self::SIMD>,
2031        ret: Location<Self::GPR, Self::SIMD>,
2032        signed: bool,
2033        sat: bool,
2034    ) -> Result<(), CompileError>;
2035    /// Convert a F64 to I32, signed or unsigned, without or without saturation
2036    fn convert_i32_f64(
2037        &mut self,
2038        loc: Location<Self::GPR, Self::SIMD>,
2039        ret: Location<Self::GPR, Self::SIMD>,
2040        signed: bool,
2041        sat: bool,
2042    ) -> Result<(), CompileError>;
2043    /// Convert a F32 to I64, signed or unsigned, without or without saturation
2044    fn convert_i64_f32(
2045        &mut self,
2046        loc: Location<Self::GPR, Self::SIMD>,
2047        ret: Location<Self::GPR, Self::SIMD>,
2048        signed: bool,
2049        sat: bool,
2050    ) -> Result<(), CompileError>;
2051    /// Convert a F32 to I32, signed or unsigned, without or without saturation
2052    fn convert_i32_f32(
2053        &mut self,
2054        loc: Location<Self::GPR, Self::SIMD>,
2055        ret: Location<Self::GPR, Self::SIMD>,
2056        signed: bool,
2057        sat: bool,
2058    ) -> Result<(), CompileError>;
2059    /// Convert a F32 to F64
2060    fn convert_f64_f32(
2061        &mut self,
2062        loc: Location<Self::GPR, Self::SIMD>,
2063        ret: Location<Self::GPR, Self::SIMD>,
2064    ) -> Result<(), CompileError>;
2065    /// Convert a F64 to F32
2066    fn convert_f32_f64(
2067        &mut self,
2068        loc: Location<Self::GPR, Self::SIMD>,
2069        ret: Location<Self::GPR, Self::SIMD>,
2070    ) -> Result<(), CompileError>;
2071    /// Negate an F64
2072    fn f64_neg(
2073        &mut self,
2074        loc: Location<Self::GPR, Self::SIMD>,
2075        ret: Location<Self::GPR, Self::SIMD>,
2076    ) -> Result<(), CompileError>;
2077    /// Get the Absolute Value of an F64
2078    fn f64_abs(
2079        &mut self,
2080        loc: Location<Self::GPR, Self::SIMD>,
2081        ret: Location<Self::GPR, Self::SIMD>,
2082    ) -> Result<(), CompileError>;
2083    /// Copy sign from tmp1 Self::GPR to tmp2 Self::GPR
2084    fn emit_i64_copysign(&mut self, tmp1: Self::GPR, tmp2: Self::GPR) -> Result<(), CompileError>;
2085    /// Get the Square Root of an F64
2086    fn f64_sqrt(
2087        &mut self,
2088        loc: Location<Self::GPR, Self::SIMD>,
2089        ret: Location<Self::GPR, Self::SIMD>,
2090    ) -> Result<(), CompileError>;
2091    /// Trunc of an F64
2092    fn f64_trunc(
2093        &mut self,
2094        loc: Location<Self::GPR, Self::SIMD>,
2095        ret: Location<Self::GPR, Self::SIMD>,
2096    ) -> Result<(), CompileError>;
2097    /// Ceil of an F64
2098    fn f64_ceil(
2099        &mut self,
2100        loc: Location<Self::GPR, Self::SIMD>,
2101        ret: Location<Self::GPR, Self::SIMD>,
2102    ) -> Result<(), CompileError>;
2103    /// Floor of an F64
2104    fn f64_floor(
2105        &mut self,
2106        loc: Location<Self::GPR, Self::SIMD>,
2107        ret: Location<Self::GPR, Self::SIMD>,
2108    ) -> Result<(), CompileError>;
2109    /// Round at nearest int of an F64
2110    fn f64_nearest(
2111        &mut self,
2112        loc: Location<Self::GPR, Self::SIMD>,
2113        ret: Location<Self::GPR, Self::SIMD>,
2114    ) -> Result<(), CompileError>;
2115    /// Greater of Equal Compare 2 F64, result in a GPR
2116    fn f64_cmp_ge(
2117        &mut self,
2118        loc_a: Location<Self::GPR, Self::SIMD>,
2119        loc_b: Location<Self::GPR, Self::SIMD>,
2120        ret: Location<Self::GPR, Self::SIMD>,
2121    ) -> Result<(), CompileError>;
2122    /// Greater Than Compare 2 F64, result in a GPR
2123    fn f64_cmp_gt(
2124        &mut self,
2125        loc_a: Location<Self::GPR, Self::SIMD>,
2126        loc_b: Location<Self::GPR, Self::SIMD>,
2127        ret: Location<Self::GPR, Self::SIMD>,
2128    ) -> Result<(), CompileError>;
2129    /// Less of Equal Compare 2 F64, result in a GPR
2130    fn f64_cmp_le(
2131        &mut self,
2132        loc_a: Location<Self::GPR, Self::SIMD>,
2133        loc_b: Location<Self::GPR, Self::SIMD>,
2134        ret: Location<Self::GPR, Self::SIMD>,
2135    ) -> Result<(), CompileError>;
2136    /// Less Than Compare 2 F64, result in a GPR
2137    fn f64_cmp_lt(
2138        &mut self,
2139        loc_a: Location<Self::GPR, Self::SIMD>,
2140        loc_b: Location<Self::GPR, Self::SIMD>,
2141        ret: Location<Self::GPR, Self::SIMD>,
2142    ) -> Result<(), CompileError>;
2143    /// Not Equal Compare 2 F64, result in a GPR
2144    fn f64_cmp_ne(
2145        &mut self,
2146        loc_a: Location<Self::GPR, Self::SIMD>,
2147        loc_b: Location<Self::GPR, Self::SIMD>,
2148        ret: Location<Self::GPR, Self::SIMD>,
2149    ) -> Result<(), CompileError>;
2150    /// Equal Compare 2 F64, result in a GPR
2151    fn f64_cmp_eq(
2152        &mut self,
2153        loc_a: Location<Self::GPR, Self::SIMD>,
2154        loc_b: Location<Self::GPR, Self::SIMD>,
2155        ret: Location<Self::GPR, Self::SIMD>,
2156    ) -> Result<(), CompileError>;
2157    /// get Min for 2 F64 values
2158    fn f64_min(
2159        &mut self,
2160        loc_a: Location<Self::GPR, Self::SIMD>,
2161        loc_b: Location<Self::GPR, Self::SIMD>,
2162        ret: Location<Self::GPR, Self::SIMD>,
2163    ) -> Result<(), CompileError>;
2164    /// get Max for 2 F64 values
2165    fn f64_max(
2166        &mut self,
2167        loc_a: Location<Self::GPR, Self::SIMD>,
2168        loc_b: Location<Self::GPR, Self::SIMD>,
2169        ret: Location<Self::GPR, Self::SIMD>,
2170    ) -> Result<(), CompileError>;
2171    /// Add 2 F64 values
2172    fn f64_add(
2173        &mut self,
2174        loc_a: Location<Self::GPR, Self::SIMD>,
2175        loc_b: Location<Self::GPR, Self::SIMD>,
2176        ret: Location<Self::GPR, Self::SIMD>,
2177    ) -> Result<(), CompileError>;
2178    /// Sub 2 F64 values
2179    fn f64_sub(
2180        &mut self,
2181        loc_a: Location<Self::GPR, Self::SIMD>,
2182        loc_b: Location<Self::GPR, Self::SIMD>,
2183        ret: Location<Self::GPR, Self::SIMD>,
2184    ) -> Result<(), CompileError>;
2185    /// Multiply 2 F64 values
2186    fn f64_mul(
2187        &mut self,
2188        loc_a: Location<Self::GPR, Self::SIMD>,
2189        loc_b: Location<Self::GPR, Self::SIMD>,
2190        ret: Location<Self::GPR, Self::SIMD>,
2191    ) -> Result<(), CompileError>;
2192    /// Divide 2 F64 values
2193    fn f64_div(
2194        &mut self,
2195        loc_a: Location<Self::GPR, Self::SIMD>,
2196        loc_b: Location<Self::GPR, Self::SIMD>,
2197        ret: Location<Self::GPR, Self::SIMD>,
2198    ) -> Result<(), CompileError>;
2199    /// Negate an F32
2200    fn f32_neg(
2201        &mut self,
2202        loc: Location<Self::GPR, Self::SIMD>,
2203        ret: Location<Self::GPR, Self::SIMD>,
2204    ) -> Result<(), CompileError>;
2205    /// Get the Absolute Value of an F32
2206    fn f32_abs(
2207        &mut self,
2208        loc: Location<Self::GPR, Self::SIMD>,
2209        ret: Location<Self::GPR, Self::SIMD>,
2210    ) -> Result<(), CompileError>;
2211    /// Copy sign from tmp1 Self::GPR to tmp2 Self::GPR
2212    fn emit_i32_copysign(&mut self, tmp1: Self::GPR, tmp2: Self::GPR) -> Result<(), CompileError>;
2213    /// Get the Square Root of an F32
2214    fn f32_sqrt(
2215        &mut self,
2216        loc: Location<Self::GPR, Self::SIMD>,
2217        ret: Location<Self::GPR, Self::SIMD>,
2218    ) -> Result<(), CompileError>;
2219    /// Trunc of an F32
2220    fn f32_trunc(
2221        &mut self,
2222        loc: Location<Self::GPR, Self::SIMD>,
2223        ret: Location<Self::GPR, Self::SIMD>,
2224    ) -> Result<(), CompileError>;
2225    /// Ceil of an F32
2226    fn f32_ceil(
2227        &mut self,
2228        loc: Location<Self::GPR, Self::SIMD>,
2229        ret: Location<Self::GPR, Self::SIMD>,
2230    ) -> Result<(), CompileError>;
2231    /// Floor of an F32
2232    fn f32_floor(
2233        &mut self,
2234        loc: Location<Self::GPR, Self::SIMD>,
2235        ret: Location<Self::GPR, Self::SIMD>,
2236    ) -> Result<(), CompileError>;
2237    /// Round at nearest int of an F32
2238    fn f32_nearest(
2239        &mut self,
2240        loc: Location<Self::GPR, Self::SIMD>,
2241        ret: Location<Self::GPR, Self::SIMD>,
2242    ) -> Result<(), CompileError>;
2243    /// Greater of Equal Compare 2 F32, result in a GPR
2244    fn f32_cmp_ge(
2245        &mut self,
2246        loc_a: Location<Self::GPR, Self::SIMD>,
2247        loc_b: Location<Self::GPR, Self::SIMD>,
2248        ret: Location<Self::GPR, Self::SIMD>,
2249    ) -> Result<(), CompileError>;
2250    /// Greater Than Compare 2 F32, result in a GPR
2251    fn f32_cmp_gt(
2252        &mut self,
2253        loc_a: Location<Self::GPR, Self::SIMD>,
2254        loc_b: Location<Self::GPR, Self::SIMD>,
2255        ret: Location<Self::GPR, Self::SIMD>,
2256    ) -> Result<(), CompileError>;
2257    /// Less of Equal Compare 2 F32, result in a GPR
2258    fn f32_cmp_le(
2259        &mut self,
2260        loc_a: Location<Self::GPR, Self::SIMD>,
2261        loc_b: Location<Self::GPR, Self::SIMD>,
2262        ret: Location<Self::GPR, Self::SIMD>,
2263    ) -> Result<(), CompileError>;
2264    /// Less Than Compare 2 F32, result in a GPR
2265    fn f32_cmp_lt(
2266        &mut self,
2267        loc_a: Location<Self::GPR, Self::SIMD>,
2268        loc_b: Location<Self::GPR, Self::SIMD>,
2269        ret: Location<Self::GPR, Self::SIMD>,
2270    ) -> Result<(), CompileError>;
2271    /// Not Equal Compare 2 F32, result in a GPR
2272    fn f32_cmp_ne(
2273        &mut self,
2274        loc_a: Location<Self::GPR, Self::SIMD>,
2275        loc_b: Location<Self::GPR, Self::SIMD>,
2276        ret: Location<Self::GPR, Self::SIMD>,
2277    ) -> Result<(), CompileError>;
2278    /// Equal Compare 2 F32, result in a GPR
2279    fn f32_cmp_eq(
2280        &mut self,
2281        loc_a: Location<Self::GPR, Self::SIMD>,
2282        loc_b: Location<Self::GPR, Self::SIMD>,
2283        ret: Location<Self::GPR, Self::SIMD>,
2284    ) -> Result<(), CompileError>;
2285    /// get Min for 2 F32 values
2286    fn f32_min(
2287        &mut self,
2288        loc_a: Location<Self::GPR, Self::SIMD>,
2289        loc_b: Location<Self::GPR, Self::SIMD>,
2290        ret: Location<Self::GPR, Self::SIMD>,
2291    ) -> Result<(), CompileError>;
2292    /// get Max for 2 F32 values
2293    fn f32_max(
2294        &mut self,
2295        loc_a: Location<Self::GPR, Self::SIMD>,
2296        loc_b: Location<Self::GPR, Self::SIMD>,
2297        ret: Location<Self::GPR, Self::SIMD>,
2298    ) -> Result<(), CompileError>;
2299    /// Add 2 F32 values
2300    fn f32_add(
2301        &mut self,
2302        loc_a: Location<Self::GPR, Self::SIMD>,
2303        loc_b: Location<Self::GPR, Self::SIMD>,
2304        ret: Location<Self::GPR, Self::SIMD>,
2305    ) -> Result<(), CompileError>;
2306    /// Sub 2 F32 values
2307    fn f32_sub(
2308        &mut self,
2309        loc_a: Location<Self::GPR, Self::SIMD>,
2310        loc_b: Location<Self::GPR, Self::SIMD>,
2311        ret: Location<Self::GPR, Self::SIMD>,
2312    ) -> Result<(), CompileError>;
2313    /// Multiply 2 F32 values
2314    fn f32_mul(
2315        &mut self,
2316        loc_a: Location<Self::GPR, Self::SIMD>,
2317        loc_b: Location<Self::GPR, Self::SIMD>,
2318        ret: Location<Self::GPR, Self::SIMD>,
2319    ) -> Result<(), CompileError>;
2320    /// Divide 2 F32 values
2321    fn f32_div(
2322        &mut self,
2323        loc_a: Location<Self::GPR, Self::SIMD>,
2324        loc_b: Location<Self::GPR, Self::SIMD>,
2325        ret: Location<Self::GPR, Self::SIMD>,
2326    ) -> Result<(), CompileError>;
2327
2328    /// Standard function Trampoline generation
2329    fn gen_std_trampoline(
2330        &self,
2331        sig: &FunctionType,
2332        calling_convention: CallingConvention,
2333    ) -> Result<FunctionBody, CompileError>;
2334    /// Generates dynamic import function call trampoline for a function type.
2335    fn gen_std_dynamic_import_trampoline(
2336        &self,
2337        vmoffsets: &VMOffsets,
2338        sig: &FunctionType,
2339        calling_convention: CallingConvention,
2340    ) -> Result<FunctionBody, CompileError>;
2341    /// Singlepass calls import functions through a trampoline.
2342    fn gen_import_call_trampoline(
2343        &self,
2344        vmoffsets: &VMOffsets,
2345        index: FunctionIndex,
2346        sig: &FunctionType,
2347        calling_convention: CallingConvention,
2348    ) -> Result<CustomSection, CompileError>;
2349    /// generate eh_frame instruction (or None if not possible / supported)
2350    fn gen_dwarf_unwind_info(&mut self, code_len: usize) -> Option<UnwindInstructions>;
2351    /// generate Windows unwind instructions (or None if not possible / supported)
2352    fn gen_windows_unwind_info(&mut self, code_len: usize) -> Option<Vec<u8>>;
2353}
2354
2355/// Standard entry trampoline generation
2356pub fn gen_std_trampoline(
2357    sig: &FunctionType,
2358    target: &Target,
2359    calling_convention: CallingConvention,
2360) -> Result<FunctionBody, CompileError> {
2361    match target.triple().architecture {
2362        Architecture::X86_64 => {
2363            let machine = MachineX86_64::new(Some(target.clone()))?;
2364            machine.gen_std_trampoline(sig, calling_convention)
2365        }
2366        Architecture::Aarch64(_) => {
2367            let machine = MachineARM64::new(Some(target.clone()));
2368            machine.gen_std_trampoline(sig, calling_convention)
2369        }
2370        Architecture::Riscv64(_) => {
2371            let machine = MachineRiscv::new(Some(target.clone()))?;
2372            machine.gen_std_trampoline(sig, calling_convention)
2373        }
2374        _ => Err(CompileError::UnsupportedTarget(
2375            "singlepass unimplemented arch for gen_std_trampoline".to_owned(),
2376        )),
2377    }
2378}
2379
2380/// Generates dynamic import function call trampoline for a function type.
2381pub fn gen_std_dynamic_import_trampoline(
2382    vmoffsets: &VMOffsets,
2383    sig: &FunctionType,
2384    target: &Target,
2385    calling_convention: CallingConvention,
2386) -> Result<FunctionBody, CompileError> {
2387    match target.triple().architecture {
2388        Architecture::X86_64 => {
2389            let machine = MachineX86_64::new(Some(target.clone()))?;
2390            machine.gen_std_dynamic_import_trampoline(vmoffsets, sig, calling_convention)
2391        }
2392        Architecture::Aarch64(_) => {
2393            let machine = MachineARM64::new(Some(target.clone()));
2394            machine.gen_std_dynamic_import_trampoline(vmoffsets, sig, calling_convention)
2395        }
2396        Architecture::Riscv64(_) => {
2397            let machine = MachineRiscv::new(Some(target.clone()))?;
2398            machine.gen_std_dynamic_import_trampoline(vmoffsets, sig, calling_convention)
2399        }
2400        _ => Err(CompileError::UnsupportedTarget(
2401            "singlepass unimplemented arch for gen_std_dynamic_import_trampoline".to_owned(),
2402        )),
2403    }
2404}
2405/// Singlepass calls import functions through a trampoline.
2406pub fn gen_import_call_trampoline(
2407    vmoffsets: &VMOffsets,
2408    index: FunctionIndex,
2409    sig: &FunctionType,
2410    target: &Target,
2411    calling_convention: CallingConvention,
2412) -> Result<CustomSection, CompileError> {
2413    match target.triple().architecture {
2414        Architecture::X86_64 => {
2415            let machine = MachineX86_64::new(Some(target.clone()))?;
2416            machine.gen_import_call_trampoline(vmoffsets, index, sig, calling_convention)
2417        }
2418        Architecture::Aarch64(_) => {
2419            let machine = MachineARM64::new(Some(target.clone()));
2420            machine.gen_import_call_trampoline(vmoffsets, index, sig, calling_convention)
2421        }
2422        Architecture::Riscv64(_) => {
2423            let machine = MachineRiscv::new(Some(target.clone()))?;
2424            machine.gen_import_call_trampoline(vmoffsets, index, sig, calling_convention)
2425        }
2426        _ => Err(CompileError::UnsupportedTarget(
2427            "singlepass unimplemented arch for gen_import_call_trampoline".to_owned(),
2428        )),
2429    }
2430}
2431
2432// Constants for the bounds of truncation operations. These are the least or
2433// greatest exact floats in either f32 or f64 representation less-than (for
2434// least) or greater-than (for greatest) the i32 or i64 or u32 or u64
2435// min (for least) or max (for greatest), when rounding towards zero.
2436
2437/// Greatest Exact Float (32 bits) less-than i32::MIN when rounding towards zero.
2438pub const GEF32_LT_I32_MIN: f32 = -2147483904.0;
2439/// Least Exact Float (32 bits) greater-than i32::MAX when rounding towards zero.
2440pub const LEF32_GT_I32_MAX: f32 = 2147483648.0;
2441/// Greatest Exact Float (32 bits) less-than i64::MIN when rounding towards zero.
2442pub const GEF32_LT_I64_MIN: f32 = -9223373136366403584.0;
2443/// Least Exact Float (32 bits) greater-than i64::MAX when rounding towards zero.
2444pub const LEF32_GT_I64_MAX: f32 = 9223372036854775808.0;
2445/// Greatest Exact Float (32 bits) less-than u32::MIN when rounding towards zero.
2446pub const GEF32_LT_U32_MIN: f32 = -1.0;
2447/// Least Exact Float (32 bits) greater-than u32::MAX when rounding towards zero.
2448pub const LEF32_GT_U32_MAX: f32 = 4294967296.0;
2449/// Greatest Exact Float (32 bits) less-than u64::MIN when rounding towards zero.
2450pub const GEF32_LT_U64_MIN: f32 = -1.0;
2451/// Least Exact Float (32 bits) greater-than u64::MAX when rounding towards zero.
2452pub const LEF32_GT_U64_MAX: f32 = 18446744073709551616.0;
2453
2454/// Greatest Exact Float (64 bits) less-than i32::MIN when rounding towards zero.
2455pub const GEF64_LT_I32_MIN: f64 = -2147483649.0;
2456/// Least Exact Float (64 bits) greater-than i32::MAX when rounding towards zero.
2457pub const LEF64_GT_I32_MAX: f64 = 2147483648.0;
2458/// Greatest Exact Float (64 bits) less-than i64::MIN when rounding towards zero.
2459pub const GEF64_LT_I64_MIN: f64 = -9223372036854777856.0;
2460/// Least Exact Float (64 bits) greater-than i64::MAX when rounding towards zero.
2461pub const LEF64_GT_I64_MAX: f64 = 9223372036854775808.0;
2462/// Greatest Exact Float (64 bits) less-than u32::MIN when rounding towards zero.
2463pub const GEF64_LT_U32_MIN: f64 = -1.0;
2464/// Least Exact Float (64 bits) greater-than u32::MAX when rounding towards zero.
2465pub const LEF64_GT_U32_MAX: f64 = 4294967296.0;
2466/// Greatest Exact Float (64 bits) less-than u64::MIN when rounding towards zero.
2467pub const GEF64_LT_U64_MIN: f64 = -1.0;
2468/// Least Exact Float (64 bits) greater-than u64::MAX when rounding towards zero.
2469pub const LEF64_GT_U64_MAX: f64 = 18446744073709551616.0;