wasmer_compiler_singlepass/
machine.rs

1use crate::{
2    common_decl::*,
3    location::{Location, Reg},
4    machine_arm64::MachineARM64,
5    machine_x64::MachineX86_64,
6    unwind::UnwindInstructions,
7};
8use dynasmrt::{AssemblyOffset, DynamicLabel};
9use std::{
10    collections::{BTreeMap, HashMap},
11    fmt::Debug,
12};
13use wasmer_compiler::{
14    types::{
15        address_map::InstructionAddressMap,
16        function::FunctionBody,
17        relocation::{Relocation, RelocationTarget},
18        section::CustomSection,
19    },
20    wasmparser::MemArg,
21};
22use wasmer_types::{
23    CompileError, FunctionIndex, FunctionType, TrapCode, TrapInformation, VMOffsets,
24    target::{Architecture, CallingConvention, Target},
25};
26pub type Label = DynamicLabel;
27pub type Offset = AssemblyOffset;
28
29#[allow(dead_code)]
30#[derive(Clone, PartialEq)]
31pub enum Value {
32    I8(i8),
33    I32(i32),
34    I64(i64),
35    F32(f32),
36    F64(f64),
37}
38
39#[macro_export]
40macro_rules! codegen_error {
41    ($($arg:tt)*) => {return Err(CompileError::Codegen(format!($($arg)*)))}
42}
43
44#[allow(unused)]
45pub trait MaybeImmediate {
46    fn imm_value(&self) -> Option<Value>;
47    fn is_imm(&self) -> bool {
48        self.imm_value().is_some()
49    }
50}
51
52/// A trap table for a `RunnableModuleInfo`.
53#[derive(Clone, Debug, Default)]
54pub struct TrapTable {
55    /// Mappings from offsets in generated machine code to the corresponding trap code.
56    pub offset_to_code: BTreeMap<usize, TrapCode>,
57}
58
59// all machine seems to have a page this size, so not per arch for now
60pub const NATIVE_PAGE_SIZE: usize = 4096;
61
62#[allow(dead_code)]
63pub enum UnsignedCondition {
64    Equal,
65    NotEqual,
66    Above,
67    AboveEqual,
68    Below,
69    BelowEqual,
70}
71
72#[derive(Debug, Clone)]
73pub enum AssemblyComment {
74    FunctionPrologue,
75    InitializeLocals,
76    TrapHandlersTable,
77    RedZone,
78    FunctionBody,
79}
80
81impl std::fmt::Display for AssemblyComment {
82    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
83        match self {
84            AssemblyComment::FunctionPrologue => write!(f, "function prologue"),
85            AssemblyComment::InitializeLocals => write!(f, "initialize locals"),
86            AssemblyComment::TrapHandlersTable => write!(f, "trap handlers table"),
87            AssemblyComment::RedZone => write!(f, "red zone"),
88            AssemblyComment::FunctionBody => write!(f, "body"),
89        }
90    }
91}
92
93pub(crate) struct FinalizedAssembly {
94    pub(crate) body: Vec<u8>,
95    pub(crate) assembly_comments: HashMap<usize, AssemblyComment>,
96}
97
98#[allow(unused)]
99pub trait Machine {
100    type GPR: Copy + Eq + Debug + Reg;
101    type SIMD: Copy + Eq + Debug + Reg;
102    /// Get current assembler offset
103    fn assembler_get_offset(&self) -> Offset;
104    /// Get the GPR that hold vmctx
105    fn get_vmctx_reg(&self) -> Self::GPR;
106    /// Picks an unused general purpose register for local/stack/argument use.
107    ///
108    /// This method does not mark the register as used
109    fn pick_gpr(&self) -> Option<Self::GPR>;
110    /// Picks an unused general purpose register for internal temporary use.
111    ///
112    /// This method does not mark the register as used
113    fn pick_temp_gpr(&self) -> Option<Self::GPR>;
114    /// Get all used GPR
115    fn get_used_gprs(&self) -> Vec<Self::GPR>;
116    /// Get all used SIMD regs
117    fn get_used_simd(&self) -> Vec<Self::SIMD>;
118    /// Picks an unused general pupose register and mark it as used
119    fn acquire_temp_gpr(&mut self) -> Option<Self::GPR>;
120    /// Releases a temporary GPR.
121    fn release_gpr(&mut self, gpr: Self::GPR);
122    /// Specify that a given register is in use.
123    fn reserve_unused_temp_gpr(&mut self, gpr: Self::GPR) -> Self::GPR;
124    /// reserve a GPR
125    fn reserve_gpr(&mut self, gpr: Self::GPR);
126    /// Push used gpr to the stack. Return the bytes taken on the stack.
127    fn push_used_gpr(&mut self, gprs: &[Self::GPR]) -> Result<usize, CompileError>;
128    /// Pop used gpr from the stack.
129    fn pop_used_gpr(&mut self, gprs: &[Self::GPR]) -> Result<(), CompileError>;
130    /// Picks an unused SIMD register.
131    ///
132    /// This method does not mark the register as used
133    fn pick_simd(&self) -> Option<Self::SIMD>;
134    /// Picks an unused SIMD register for internal temporary use.
135    ///
136    /// This method does not mark the register as used
137    fn pick_temp_simd(&self) -> Option<Self::SIMD>;
138    /// Acquires a temporary XMM register.
139    fn acquire_temp_simd(&mut self) -> Option<Self::SIMD>;
140    /// reserve a SIMD register
141    fn reserve_simd(&mut self, simd: Self::SIMD);
142    /// Releases a temporary XMM register.
143    fn release_simd(&mut self, simd: Self::SIMD);
144    /// Push used simd regs to the stack. Return bytes taken on the stack
145    fn push_used_simd(&mut self, simds: &[Self::SIMD]) -> Result<usize, CompileError>;
146    /// Pop used simd regs to the stack
147    fn pop_used_simd(&mut self, simds: &[Self::SIMD]) -> Result<(), CompileError>;
148    /// Return a rounded stack adjustement value (must be multiple of 16bytes on ARM64 for example)
149    fn round_stack_adjust(&self, value: usize) -> usize;
150    /// Set the source location of the Wasm to the given offset.
151    fn set_srcloc(&mut self, offset: u32);
152    /// Marks each address in the code range emitted by `f` with the trap code `code`.
153    fn mark_address_range_with_trap_code(&mut self, code: TrapCode, begin: usize, end: usize);
154    /// Marks one address as trappable with trap code `code`.
155    fn mark_address_with_trap_code(&mut self, code: TrapCode);
156    /// Marks the instruction as trappable with trap code `code`. return "begin" offset
157    fn mark_instruction_with_trap_code(&mut self, code: TrapCode) -> usize;
158    /// Pushes the instruction to the address map, calculating the offset from a
159    /// provided beginning address.
160    fn mark_instruction_address_end(&mut self, begin: usize);
161    /// Insert a StackOverflow (at offset 0)
162    fn insert_stackoverflow(&mut self);
163    /// Get all current TrapInformation
164    fn collect_trap_information(&self) -> Vec<TrapInformation>;
165    // Get all intructions address map
166    fn instructions_address_map(&self) -> Vec<InstructionAddressMap>;
167    /// Memory location for a local on the stack
168    /// Like Location::Memory(GPR::RBP, -(self.stack_offset.0 as i32)) for x86_64
169    fn local_on_stack(&mut self, stack_offset: i32) -> Location<Self::GPR, Self::SIMD>;
170    /// Allocate an extra space on the stack.
171    fn extend_stack(&mut self, delta_stack_offset: u32) -> Result<(), CompileError>;
172    /// Truncate stack space by the `delta_stack_offset`.
173    fn truncate_stack(&mut self, delta_stack_offset: u32) -> Result<(), CompileError>;
174    /// Zero a location taht is 32bits
175    fn zero_location(
176        &mut self,
177        size: Size,
178        location: Location<Self::GPR, Self::SIMD>,
179    ) -> Result<(), CompileError>;
180    /// GPR Reg used for local pointer on the stack
181    fn local_pointer(&self) -> Self::GPR;
182    /// push a value on the stack for a native call
183    fn move_location_for_native(
184        &mut self,
185        size: Size,
186        loc: Location<Self::GPR, Self::SIMD>,
187        dest: Location<Self::GPR, Self::SIMD>,
188    ) -> Result<(), CompileError>;
189    /// Determine whether a local should be allocated on the stack.
190    fn is_local_on_stack(&self, idx: usize) -> bool;
191    /// Determine a local's location.
192    fn get_local_location(
193        &self,
194        idx: usize,
195        callee_saved_regs_size: usize,
196    ) -> Location<Self::GPR, Self::SIMD>;
197    /// Move a local to the stack
198    /// Like emit_mov(Size::S64, location, Location::Memory(GPR::RBP, -(self.stack_offset.0 as i32)));
199    fn move_local(
200        &mut self,
201        stack_offset: i32,
202        location: Location<Self::GPR, Self::SIMD>,
203    ) -> Result<(), CompileError>;
204    /// List of register to save, depending on the CallingConvention
205    fn list_to_save(
206        &self,
207        calling_convention: CallingConvention,
208    ) -> Vec<Location<Self::GPR, Self::SIMD>>;
209    /// Get registers for first N function call parameters.
210    fn get_param_registers(&self, calling_convention: CallingConvention) -> &'static [Self::GPR];
211    /// Get param location (to build a call, using SP for stack args)
212    fn get_param_location(
213        &self,
214        idx: usize,
215        sz: Size,
216        stack_offset: &mut usize,
217        calling_convention: CallingConvention,
218    ) -> Location<Self::GPR, Self::SIMD>;
219    /// Get call param location (from a call, using FP for stack args)
220    fn get_call_param_location(
221        &self,
222        result_slots: usize,
223        idx: usize,
224        sz: Size,
225        stack_offset: &mut usize,
226        calling_convention: CallingConvention,
227    ) -> Location<Self::GPR, Self::SIMD>;
228    /// Get param location (idx must point to an argument that is passed in a GPR).
229    fn get_simple_param_location(
230        &self,
231        idx: usize,
232        calling_convention: CallingConvention,
233    ) -> Self::GPR;
234    /// Get return value location (to build a call, using SP for stack return values).
235    fn get_return_value_location(
236        &self,
237        idx: usize,
238        stack_location: &mut usize,
239        calling_convention: CallingConvention,
240    ) -> Location<Self::GPR, Self::SIMD>;
241    /// Get return value location (from a call, using FP for stack return values).
242    fn get_call_return_value_location(
243        &self,
244        idx: usize,
245        calling_convention: CallingConvention,
246    ) -> Location<Self::GPR, Self::SIMD>;
247    /// move a location to another
248    fn move_location(
249        &mut self,
250        size: Size,
251        source: Location<Self::GPR, Self::SIMD>,
252        dest: Location<Self::GPR, Self::SIMD>,
253    ) -> Result<(), CompileError>;
254    /// move a location to another, with zero or sign extension
255    fn move_location_extend(
256        &mut self,
257        size_val: Size,
258        signed: bool,
259        source: Location<Self::GPR, Self::SIMD>,
260        size_op: Size,
261        dest: Location<Self::GPR, Self::SIMD>,
262    ) -> Result<(), CompileError>;
263    /// Init the stack loc counter
264    fn init_stack_loc(
265        &mut self,
266        init_stack_loc_cnt: u64,
267        last_stack_loc: Location<Self::GPR, Self::SIMD>,
268    ) -> Result<(), CompileError>;
269    /// Restore save_area
270    fn restore_saved_area(&mut self, saved_area_offset: i32) -> Result<(), CompileError>;
271    /// Pop a location
272    fn pop_location(
273        &mut self,
274        location: Location<Self::GPR, Self::SIMD>,
275    ) -> Result<(), CompileError>;
276
277    /// Finalize the assembler
278    fn assembler_finalize(
279        self,
280        assembly_comments: HashMap<usize, AssemblyComment>,
281    ) -> Result<FinalizedAssembly, CompileError>;
282
283    /// get_offset of Assembler
284    fn get_offset(&self) -> Offset;
285
286    /// finalize a function
287    fn finalize_function(&mut self) -> Result<(), CompileError>;
288
289    /// emit native function prolog (depending on the calling Convention, like "PUSH RBP / MOV RSP, RBP")
290    fn emit_function_prolog(&mut self) -> Result<(), CompileError>;
291    /// emit native function epilog (depending on the calling Convention, like "MOV RBP, RSP / POP RBP")
292    fn emit_function_epilog(&mut self) -> Result<(), CompileError>;
293    /// Handle copy to SIMD register from ret value (if needed by the arch/calling convention)
294    fn emit_function_return_float(&mut self) -> Result<(), CompileError>;
295    /// Cannonicalize a NaN (or panic if not supported)
296    fn canonicalize_nan(
297        &mut self,
298        sz: Size,
299        input: Location<Self::GPR, Self::SIMD>,
300        output: Location<Self::GPR, Self::SIMD>,
301    ) -> Result<(), CompileError>;
302
303    /// emit an Illegal Opcode, associated with a trapcode
304    fn emit_illegal_op(&mut self, trp: TrapCode) -> Result<(), CompileError>;
305    /// create a new label
306    fn get_label(&mut self) -> Label;
307    /// emit a label
308    fn emit_label(&mut self, label: Label) -> Result<(), CompileError>;
309
310    /// get the gpr used for call. like RAX on x86_64
311    fn get_gpr_for_call(&self) -> Self::GPR;
312    /// Emit a call using the value in register
313    fn emit_call_register(&mut self, register: Self::GPR) -> Result<(), CompileError>;
314    /// Emit a call to a label
315    fn emit_call_label(&mut self, label: Label) -> Result<(), CompileError>;
316    /// indirect call with trampoline
317    fn arch_emit_indirect_call_with_trampoline(
318        &mut self,
319        location: Location<Self::GPR, Self::SIMD>,
320    ) -> Result<(), CompileError>;
321    /// emit a call to a location
322    fn emit_call_location(
323        &mut self,
324        location: Location<Self::GPR, Self::SIMD>,
325    ) -> Result<(), CompileError>;
326
327    /// Emit a debug breakpoint
328    fn emit_debug_breakpoint(&mut self) -> Result<(), CompileError>;
329
330    /// Add src+dst -> dst (with or without flags)
331    fn location_add(
332        &mut self,
333        size: Size,
334        source: Location<Self::GPR, Self::SIMD>,
335        dest: Location<Self::GPR, Self::SIMD>,
336        flags: bool,
337    ) -> Result<(), CompileError>;
338
339    /// Cmp src - dst and set flags
340    fn location_cmp(
341        &mut self,
342        size: Size,
343        source: Location<Self::GPR, Self::SIMD>,
344        dest: Location<Self::GPR, Self::SIMD>,
345    ) -> Result<(), CompileError>;
346
347    /// jmp without condidtion
348    fn jmp_unconditional(&mut self, label: Label) -> Result<(), CompileError>;
349
350    /// jmp to label if the provided condition is true (when comparing loc_a and loc_b)
351    fn jmp_on_condition(
352        &mut self,
353        cond: UnsignedCondition,
354        size: Size,
355        loc_a: Location<Self::GPR, Self::SIMD>,
356        loc_b: Location<Self::GPR, Self::SIMD>,
357        label: Label,
358    ) -> Result<(), CompileError>;
359
360    /// jmp using a jump table at lable with cond as the indice
361    fn emit_jmp_to_jumptable(
362        &mut self,
363        label: Label,
364        cond: Location<Self::GPR, Self::SIMD>,
365    ) -> Result<(), CompileError>;
366
367    /// Align for Loop (may do nothing, depending on the arch)
368    fn align_for_loop(&mut self) -> Result<(), CompileError>;
369
370    /// ret (from a Call)
371    fn emit_ret(&mut self) -> Result<(), CompileError>;
372
373    /// Stack push of a location
374    fn emit_push(
375        &mut self,
376        size: Size,
377        loc: Location<Self::GPR, Self::SIMD>,
378    ) -> Result<(), CompileError>;
379    /// Stack pop of a location
380    fn emit_pop(
381        &mut self,
382        size: Size,
383        loc: Location<Self::GPR, Self::SIMD>,
384    ) -> Result<(), CompileError>;
385    /// relaxed mov: move from anywhere to anywhere
386    fn emit_relaxed_mov(
387        &mut self,
388        sz: Size,
389        src: Location<Self::GPR, Self::SIMD>,
390        dst: Location<Self::GPR, Self::SIMD>,
391    ) -> Result<(), CompileError>;
392    /// relaxed cmp: compare from anywhere and anywhere
393    fn emit_relaxed_cmp(
394        &mut self,
395        sz: Size,
396        src: Location<Self::GPR, Self::SIMD>,
397        dst: Location<Self::GPR, Self::SIMD>,
398    ) -> Result<(), CompileError>;
399    /// Emit a memory fence. Can be nothing for x86_64 or a DMB on ARM64 for example
400    fn emit_memory_fence(&mut self) -> Result<(), CompileError>;
401    /// relaxed move with sign extension
402    fn emit_relaxed_sign_extension(
403        &mut self,
404        sz_src: Size,
405        src: Location<Self::GPR, Self::SIMD>,
406        sz_dst: Size,
407        dst: Location<Self::GPR, Self::SIMD>,
408    ) -> Result<(), CompileError>;
409    /// Multiply location with immediate
410    fn emit_imul_imm32(
411        &mut self,
412        size: Size,
413        imm32: u32,
414        gpr: Self::GPR,
415    ) -> Result<(), CompileError>;
416    /// Add with location directly from the stack
417    fn emit_binop_add32(
418        &mut self,
419        loc_a: Location<Self::GPR, Self::SIMD>,
420        loc_b: Location<Self::GPR, Self::SIMD>,
421        ret: Location<Self::GPR, Self::SIMD>,
422    ) -> Result<(), CompileError>;
423    /// Sub with location directly from the stack
424    fn emit_binop_sub32(
425        &mut self,
426        loc_a: Location<Self::GPR, Self::SIMD>,
427        loc_b: Location<Self::GPR, Self::SIMD>,
428        ret: Location<Self::GPR, Self::SIMD>,
429    ) -> Result<(), CompileError>;
430    /// Multiply with location directly from the stack
431    fn emit_binop_mul32(
432        &mut self,
433        loc_a: Location<Self::GPR, Self::SIMD>,
434        loc_b: Location<Self::GPR, Self::SIMD>,
435        ret: Location<Self::GPR, Self::SIMD>,
436    ) -> Result<(), CompileError>;
437    /// Unsigned Division with location directly from the stack. return the offset of the DIV opcode, to mark as trappable.
438    fn emit_binop_udiv32(
439        &mut self,
440        loc_a: Location<Self::GPR, Self::SIMD>,
441        loc_b: Location<Self::GPR, Self::SIMD>,
442        ret: Location<Self::GPR, Self::SIMD>,
443        integer_division_by_zero: Label,
444    ) -> Result<usize, CompileError>;
445    /// Signed Division with location directly from the stack. return the offset of the DIV opcode, to mark as trappable.
446    fn emit_binop_sdiv32(
447        &mut self,
448        loc_a: Location<Self::GPR, Self::SIMD>,
449        loc_b: Location<Self::GPR, Self::SIMD>,
450        ret: Location<Self::GPR, Self::SIMD>,
451        integer_division_by_zero: Label,
452        integer_overflow: Label,
453    ) -> Result<usize, CompileError>;
454    /// Unsigned Reminder (of a division) with location directly from the stack. return the offset of the DIV opcode, to mark as trappable.
455    fn emit_binop_urem32(
456        &mut self,
457        loc_a: Location<Self::GPR, Self::SIMD>,
458        loc_b: Location<Self::GPR, Self::SIMD>,
459        ret: Location<Self::GPR, Self::SIMD>,
460        integer_division_by_zero: Label,
461    ) -> Result<usize, CompileError>;
462    /// Signed Reminder (of a Division) with location directly from the stack. return the offset of the DIV opcode, to mark as trappable.
463    fn emit_binop_srem32(
464        &mut self,
465        loc_a: Location<Self::GPR, Self::SIMD>,
466        loc_b: Location<Self::GPR, Self::SIMD>,
467        ret: Location<Self::GPR, Self::SIMD>,
468        integer_division_by_zero: Label,
469    ) -> Result<usize, CompileError>;
470    /// And with location directly from the stack
471    fn emit_binop_and32(
472        &mut self,
473        loc_a: Location<Self::GPR, Self::SIMD>,
474        loc_b: Location<Self::GPR, Self::SIMD>,
475        ret: Location<Self::GPR, Self::SIMD>,
476    ) -> Result<(), CompileError>;
477    /// Or with location directly from the stack
478    fn emit_binop_or32(
479        &mut self,
480        loc_a: Location<Self::GPR, Self::SIMD>,
481        loc_b: Location<Self::GPR, Self::SIMD>,
482        ret: Location<Self::GPR, Self::SIMD>,
483    ) -> Result<(), CompileError>;
484    /// Xor with location directly from the stack
485    fn emit_binop_xor32(
486        &mut self,
487        loc_a: Location<Self::GPR, Self::SIMD>,
488        loc_b: Location<Self::GPR, Self::SIMD>,
489        ret: Location<Self::GPR, Self::SIMD>,
490    ) -> Result<(), CompileError>;
491    /// Signed Greater of Equal Compare 2 i32, result in a GPR
492    fn i32_cmp_ge_s(
493        &mut self,
494        loc_a: Location<Self::GPR, Self::SIMD>,
495        loc_b: Location<Self::GPR, Self::SIMD>,
496        ret: Location<Self::GPR, Self::SIMD>,
497    ) -> Result<(), CompileError>;
498    /// Signed Greater Than Compare 2 i32, result in a GPR
499    fn i32_cmp_gt_s(
500        &mut self,
501        loc_a: Location<Self::GPR, Self::SIMD>,
502        loc_b: Location<Self::GPR, Self::SIMD>,
503        ret: Location<Self::GPR, Self::SIMD>,
504    ) -> Result<(), CompileError>;
505    /// Signed Less of Equal Compare 2 i32, result in a GPR
506    fn i32_cmp_le_s(
507        &mut self,
508        loc_a: Location<Self::GPR, Self::SIMD>,
509        loc_b: Location<Self::GPR, Self::SIMD>,
510        ret: Location<Self::GPR, Self::SIMD>,
511    ) -> Result<(), CompileError>;
512    /// Signed Less Than Compare 2 i32, result in a GPR
513    fn i32_cmp_lt_s(
514        &mut self,
515        loc_a: Location<Self::GPR, Self::SIMD>,
516        loc_b: Location<Self::GPR, Self::SIMD>,
517        ret: Location<Self::GPR, Self::SIMD>,
518    ) -> Result<(), CompileError>;
519    /// Unsigned Greater of Equal Compare 2 i32, result in a GPR
520    fn i32_cmp_ge_u(
521        &mut self,
522        loc_a: Location<Self::GPR, Self::SIMD>,
523        loc_b: Location<Self::GPR, Self::SIMD>,
524        ret: Location<Self::GPR, Self::SIMD>,
525    ) -> Result<(), CompileError>;
526    /// Unsigned Greater Than Compare 2 i32, result in a GPR
527    fn i32_cmp_gt_u(
528        &mut self,
529        loc_a: Location<Self::GPR, Self::SIMD>,
530        loc_b: Location<Self::GPR, Self::SIMD>,
531        ret: Location<Self::GPR, Self::SIMD>,
532    ) -> Result<(), CompileError>;
533    /// Unsigned Less of Equal Compare 2 i32, result in a GPR
534    fn i32_cmp_le_u(
535        &mut self,
536        loc_a: Location<Self::GPR, Self::SIMD>,
537        loc_b: Location<Self::GPR, Self::SIMD>,
538        ret: Location<Self::GPR, Self::SIMD>,
539    ) -> Result<(), CompileError>;
540    /// Unsigned Less Than Compare 2 i32, result in a GPR
541    fn i32_cmp_lt_u(
542        &mut self,
543        loc_a: Location<Self::GPR, Self::SIMD>,
544        loc_b: Location<Self::GPR, Self::SIMD>,
545        ret: Location<Self::GPR, Self::SIMD>,
546    ) -> Result<(), CompileError>;
547    /// Not Equal Compare 2 i32, result in a GPR
548    fn i32_cmp_ne(
549        &mut self,
550        loc_a: Location<Self::GPR, Self::SIMD>,
551        loc_b: Location<Self::GPR, Self::SIMD>,
552        ret: Location<Self::GPR, Self::SIMD>,
553    ) -> Result<(), CompileError>;
554    /// Equal Compare 2 i32, result in a GPR
555    fn i32_cmp_eq(
556        &mut self,
557        loc_a: Location<Self::GPR, Self::SIMD>,
558        loc_b: Location<Self::GPR, Self::SIMD>,
559        ret: Location<Self::GPR, Self::SIMD>,
560    ) -> Result<(), CompileError>;
561    /// Count Leading 0 bit of an i32
562    fn i32_clz(
563        &mut self,
564        loc: Location<Self::GPR, Self::SIMD>,
565        ret: Location<Self::GPR, Self::SIMD>,
566    ) -> Result<(), CompileError>;
567    /// Count Trailling 0 bit of an i32
568    fn i32_ctz(
569        &mut self,
570        loc: Location<Self::GPR, Self::SIMD>,
571        ret: Location<Self::GPR, Self::SIMD>,
572    ) -> Result<(), CompileError>;
573    /// Count the number of 1 bit of an i32
574    fn i32_popcnt(
575        &mut self,
576        loc: Location<Self::GPR, Self::SIMD>,
577        ret: Location<Self::GPR, Self::SIMD>,
578    ) -> Result<(), CompileError>;
579    /// i32 Logical Shift Left
580    fn i32_shl(
581        &mut self,
582        loc_a: Location<Self::GPR, Self::SIMD>,
583        loc_b: Location<Self::GPR, Self::SIMD>,
584        ret: Location<Self::GPR, Self::SIMD>,
585    ) -> Result<(), CompileError>;
586    /// i32 Logical Shift Right
587    fn i32_shr(
588        &mut self,
589        loc_a: Location<Self::GPR, Self::SIMD>,
590        loc_b: Location<Self::GPR, Self::SIMD>,
591        ret: Location<Self::GPR, Self::SIMD>,
592    ) -> Result<(), CompileError>;
593    /// i32 Arithmetic Shift Right
594    fn i32_sar(
595        &mut self,
596        loc_a: Location<Self::GPR, Self::SIMD>,
597        loc_b: Location<Self::GPR, Self::SIMD>,
598        ret: Location<Self::GPR, Self::SIMD>,
599    ) -> Result<(), CompileError>;
600    /// i32 Roll Left
601    fn i32_rol(
602        &mut self,
603        loc_a: Location<Self::GPR, Self::SIMD>,
604        loc_b: Location<Self::GPR, Self::SIMD>,
605        ret: Location<Self::GPR, Self::SIMD>,
606    ) -> Result<(), CompileError>;
607    /// i32 Roll Right
608    fn i32_ror(
609        &mut self,
610        loc_a: Location<Self::GPR, Self::SIMD>,
611        loc_b: Location<Self::GPR, Self::SIMD>,
612        ret: Location<Self::GPR, Self::SIMD>,
613    ) -> Result<(), CompileError>;
614    /// i32 load
615    #[allow(clippy::too_many_arguments)]
616    fn i32_load(
617        &mut self,
618        addr: Location<Self::GPR, Self::SIMD>,
619        memarg: &MemArg,
620        ret: Location<Self::GPR, Self::SIMD>,
621        need_check: bool,
622        imported_memories: bool,
623        offset: i32,
624        heap_access_oob: Label,
625        unaligned_atomic: Label,
626    ) -> Result<(), CompileError>;
627    /// i32 load of an unsigned 8bits
628    #[allow(clippy::too_many_arguments)]
629    fn i32_load_8u(
630        &mut self,
631        addr: Location<Self::GPR, Self::SIMD>,
632        memarg: &MemArg,
633        ret: Location<Self::GPR, Self::SIMD>,
634        need_check: bool,
635        imported_memories: bool,
636        offset: i32,
637        heap_access_oob: Label,
638        unaligned_atomic: Label,
639    ) -> Result<(), CompileError>;
640    /// i32 load of an signed 8bits
641    #[allow(clippy::too_many_arguments)]
642    fn i32_load_8s(
643        &mut self,
644        addr: Location<Self::GPR, Self::SIMD>,
645        memarg: &MemArg,
646        ret: Location<Self::GPR, Self::SIMD>,
647        need_check: bool,
648        imported_memories: bool,
649        offset: i32,
650        heap_access_oob: Label,
651        unaligned_atomic: Label,
652    ) -> Result<(), CompileError>;
653    /// i32 load of an unsigned 16bits
654    #[allow(clippy::too_many_arguments)]
655    fn i32_load_16u(
656        &mut self,
657        addr: Location<Self::GPR, Self::SIMD>,
658        memarg: &MemArg,
659        ret: Location<Self::GPR, Self::SIMD>,
660        need_check: bool,
661        imported_memories: bool,
662        offset: i32,
663        heap_access_oob: Label,
664        unaligned_atomic: Label,
665    ) -> Result<(), CompileError>;
666    /// i32 load of an signed 16bits
667    #[allow(clippy::too_many_arguments)]
668    fn i32_load_16s(
669        &mut self,
670        addr: Location<Self::GPR, Self::SIMD>,
671        memarg: &MemArg,
672        ret: Location<Self::GPR, Self::SIMD>,
673        need_check: bool,
674        imported_memories: bool,
675        offset: i32,
676        heap_access_oob: Label,
677        unaligned_atomic: Label,
678    ) -> Result<(), CompileError>;
679    /// i32 atomic load
680    #[allow(clippy::too_many_arguments)]
681    fn i32_atomic_load(
682        &mut self,
683        addr: Location<Self::GPR, Self::SIMD>,
684        memarg: &MemArg,
685        ret: Location<Self::GPR, Self::SIMD>,
686        need_check: bool,
687        imported_memories: bool,
688        offset: i32,
689        heap_access_oob: Label,
690        unaligned_atomic: Label,
691    ) -> Result<(), CompileError>;
692    /// i32 atomic load of an unsigned 8bits
693    #[allow(clippy::too_many_arguments)]
694    fn i32_atomic_load_8u(
695        &mut self,
696        addr: Location<Self::GPR, Self::SIMD>,
697        memarg: &MemArg,
698        ret: Location<Self::GPR, Self::SIMD>,
699        need_check: bool,
700        imported_memories: bool,
701        offset: i32,
702        heap_access_oob: Label,
703        unaligned_atomic: Label,
704    ) -> Result<(), CompileError>;
705    /// i32 atomic load of an unsigned 16bits
706    #[allow(clippy::too_many_arguments)]
707    fn i32_atomic_load_16u(
708        &mut self,
709        addr: Location<Self::GPR, Self::SIMD>,
710        memarg: &MemArg,
711        ret: Location<Self::GPR, Self::SIMD>,
712        need_check: bool,
713        imported_memories: bool,
714        offset: i32,
715        heap_access_oob: Label,
716        unaligned_atomic: Label,
717    ) -> Result<(), CompileError>;
718    /// i32 save
719    #[allow(clippy::too_many_arguments)]
720    fn i32_save(
721        &mut self,
722        value: Location<Self::GPR, Self::SIMD>,
723        memarg: &MemArg,
724        addr: Location<Self::GPR, Self::SIMD>,
725        need_check: bool,
726        imported_memories: bool,
727        offset: i32,
728        heap_access_oob: Label,
729        unaligned_atomic: Label,
730    ) -> Result<(), CompileError>;
731    /// i32 save of the lower 8bits
732    #[allow(clippy::too_many_arguments)]
733    fn i32_save_8(
734        &mut self,
735        value: Location<Self::GPR, Self::SIMD>,
736        memarg: &MemArg,
737        addr: Location<Self::GPR, Self::SIMD>,
738        need_check: bool,
739        imported_memories: bool,
740        offset: i32,
741        heap_access_oob: Label,
742        unaligned_atomic: Label,
743    ) -> Result<(), CompileError>;
744    /// i32 save of the lower 16bits
745    #[allow(clippy::too_many_arguments)]
746    fn i32_save_16(
747        &mut self,
748        value: Location<Self::GPR, Self::SIMD>,
749        memarg: &MemArg,
750        addr: Location<Self::GPR, Self::SIMD>,
751        need_check: bool,
752        imported_memories: bool,
753        offset: i32,
754        heap_access_oob: Label,
755        unaligned_atomic: Label,
756    ) -> Result<(), CompileError>;
757    /// i32 atomic save
758    #[allow(clippy::too_many_arguments)]
759    fn i32_atomic_save(
760        &mut self,
761        value: Location<Self::GPR, Self::SIMD>,
762        memarg: &MemArg,
763        addr: Location<Self::GPR, Self::SIMD>,
764        need_check: bool,
765        imported_memories: bool,
766        offset: i32,
767        heap_access_oob: Label,
768        unaligned_atomic: Label,
769    ) -> Result<(), CompileError>;
770    /// i32 atomic save of a the lower 8bits
771    #[allow(clippy::too_many_arguments)]
772    fn i32_atomic_save_8(
773        &mut self,
774        value: Location<Self::GPR, Self::SIMD>,
775        memarg: &MemArg,
776        addr: Location<Self::GPR, Self::SIMD>,
777        need_check: bool,
778        imported_memories: bool,
779        offset: i32,
780        heap_access_oob: Label,
781        unaligned_atomic: Label,
782    ) -> Result<(), CompileError>;
783    /// i32 atomic save of a the lower 16bits
784    #[allow(clippy::too_many_arguments)]
785    fn i32_atomic_save_16(
786        &mut self,
787        value: Location<Self::GPR, Self::SIMD>,
788        memarg: &MemArg,
789        addr: Location<Self::GPR, Self::SIMD>,
790        need_check: bool,
791        imported_memories: bool,
792        offset: i32,
793        heap_access_oob: Label,
794        unaligned_atomic: Label,
795    ) -> Result<(), CompileError>;
796    /// i32 atomic Add with i32
797    #[allow(clippy::too_many_arguments)]
798    fn i32_atomic_add(
799        &mut self,
800        loc: Location<Self::GPR, Self::SIMD>,
801        target: Location<Self::GPR, Self::SIMD>,
802        memarg: &MemArg,
803        ret: Location<Self::GPR, Self::SIMD>,
804        need_check: bool,
805        imported_memories: bool,
806        offset: i32,
807        heap_access_oob: Label,
808        unaligned_atomic: Label,
809    ) -> Result<(), CompileError>;
810    /// i32 atomic Add with unsigned 8bits
811    #[allow(clippy::too_many_arguments)]
812    fn i32_atomic_add_8u(
813        &mut self,
814        loc: Location<Self::GPR, Self::SIMD>,
815        target: Location<Self::GPR, Self::SIMD>,
816        memarg: &MemArg,
817        ret: Location<Self::GPR, Self::SIMD>,
818        need_check: bool,
819        imported_memories: bool,
820        offset: i32,
821        heap_access_oob: Label,
822        unaligned_atomic: Label,
823    ) -> Result<(), CompileError>;
824    /// i32 atomic Add with unsigned 16bits
825    #[allow(clippy::too_many_arguments)]
826    fn i32_atomic_add_16u(
827        &mut self,
828        loc: Location<Self::GPR, Self::SIMD>,
829        target: Location<Self::GPR, Self::SIMD>,
830        memarg: &MemArg,
831        ret: Location<Self::GPR, Self::SIMD>,
832        need_check: bool,
833        imported_memories: bool,
834        offset: i32,
835        heap_access_oob: Label,
836        unaligned_atomic: Label,
837    ) -> Result<(), CompileError>;
838    /// i32 atomic Sub with i32
839    #[allow(clippy::too_many_arguments)]
840    fn i32_atomic_sub(
841        &mut self,
842        loc: Location<Self::GPR, Self::SIMD>,
843        target: Location<Self::GPR, Self::SIMD>,
844        memarg: &MemArg,
845        ret: Location<Self::GPR, Self::SIMD>,
846        need_check: bool,
847        imported_memories: bool,
848        offset: i32,
849        heap_access_oob: Label,
850        unaligned_atomic: Label,
851    ) -> Result<(), CompileError>;
852    /// i32 atomic Sub with unsigned 8bits
853    #[allow(clippy::too_many_arguments)]
854    fn i32_atomic_sub_8u(
855        &mut self,
856        loc: Location<Self::GPR, Self::SIMD>,
857        target: Location<Self::GPR, Self::SIMD>,
858        memarg: &MemArg,
859        ret: Location<Self::GPR, Self::SIMD>,
860        need_check: bool,
861        imported_memories: bool,
862        offset: i32,
863        heap_access_oob: Label,
864        unaligned_atomic: Label,
865    ) -> Result<(), CompileError>;
866    /// i32 atomic Sub with unsigned 16bits
867    #[allow(clippy::too_many_arguments)]
868    fn i32_atomic_sub_16u(
869        &mut self,
870        loc: Location<Self::GPR, Self::SIMD>,
871        target: Location<Self::GPR, Self::SIMD>,
872        memarg: &MemArg,
873        ret: Location<Self::GPR, Self::SIMD>,
874        need_check: bool,
875        imported_memories: bool,
876        offset: i32,
877        heap_access_oob: Label,
878        unaligned_atomic: Label,
879    ) -> Result<(), CompileError>;
880    /// i32 atomic And with i32
881    #[allow(clippy::too_many_arguments)]
882    fn i32_atomic_and(
883        &mut self,
884        loc: Location<Self::GPR, Self::SIMD>,
885        target: Location<Self::GPR, Self::SIMD>,
886        memarg: &MemArg,
887        ret: Location<Self::GPR, Self::SIMD>,
888        need_check: bool,
889        imported_memories: bool,
890        offset: i32,
891        heap_access_oob: Label,
892        unaligned_atomic: Label,
893    ) -> Result<(), CompileError>;
894    /// i32 atomic And with unsigned 8bits
895    #[allow(clippy::too_many_arguments)]
896    fn i32_atomic_and_8u(
897        &mut self,
898        loc: Location<Self::GPR, Self::SIMD>,
899        target: Location<Self::GPR, Self::SIMD>,
900        memarg: &MemArg,
901        ret: Location<Self::GPR, Self::SIMD>,
902        need_check: bool,
903        imported_memories: bool,
904        offset: i32,
905        heap_access_oob: Label,
906        unaligned_atomic: Label,
907    ) -> Result<(), CompileError>;
908    /// i32 atomic And with unsigned 16bits
909    #[allow(clippy::too_many_arguments)]
910    fn i32_atomic_and_16u(
911        &mut self,
912        loc: Location<Self::GPR, Self::SIMD>,
913        target: Location<Self::GPR, Self::SIMD>,
914        memarg: &MemArg,
915        ret: Location<Self::GPR, Self::SIMD>,
916        need_check: bool,
917        imported_memories: bool,
918        offset: i32,
919        heap_access_oob: Label,
920        unaligned_atomic: Label,
921    ) -> Result<(), CompileError>;
922    /// i32 atomic Or with i32
923    #[allow(clippy::too_many_arguments)]
924    fn i32_atomic_or(
925        &mut self,
926        loc: Location<Self::GPR, Self::SIMD>,
927        target: Location<Self::GPR, Self::SIMD>,
928        memarg: &MemArg,
929        ret: Location<Self::GPR, Self::SIMD>,
930        need_check: bool,
931        imported_memories: bool,
932        offset: i32,
933        heap_access_oob: Label,
934        unaligned_atomic: Label,
935    ) -> Result<(), CompileError>;
936    /// i32 atomic Or with unsigned 8bits
937    #[allow(clippy::too_many_arguments)]
938    fn i32_atomic_or_8u(
939        &mut self,
940        loc: Location<Self::GPR, Self::SIMD>,
941        target: Location<Self::GPR, Self::SIMD>,
942        memarg: &MemArg,
943        ret: Location<Self::GPR, Self::SIMD>,
944        need_check: bool,
945        imported_memories: bool,
946        offset: i32,
947        heap_access_oob: Label,
948        unaligned_atomic: Label,
949    ) -> Result<(), CompileError>;
950    /// i32 atomic Or with unsigned 16bits
951    #[allow(clippy::too_many_arguments)]
952    fn i32_atomic_or_16u(
953        &mut self,
954        loc: Location<Self::GPR, Self::SIMD>,
955        target: Location<Self::GPR, Self::SIMD>,
956        memarg: &MemArg,
957        ret: Location<Self::GPR, Self::SIMD>,
958        need_check: bool,
959        imported_memories: bool,
960        offset: i32,
961        heap_access_oob: Label,
962        unaligned_atomic: Label,
963    ) -> Result<(), CompileError>;
964    /// i32 atomic Xor with i32
965    #[allow(clippy::too_many_arguments)]
966    fn i32_atomic_xor(
967        &mut self,
968        loc: Location<Self::GPR, Self::SIMD>,
969        target: Location<Self::GPR, Self::SIMD>,
970        memarg: &MemArg,
971        ret: Location<Self::GPR, Self::SIMD>,
972        need_check: bool,
973        imported_memories: bool,
974        offset: i32,
975        heap_access_oob: Label,
976        unaligned_atomic: Label,
977    ) -> Result<(), CompileError>;
978    /// i32 atomic Xor with unsigned 8bits
979    #[allow(clippy::too_many_arguments)]
980    fn i32_atomic_xor_8u(
981        &mut self,
982        loc: Location<Self::GPR, Self::SIMD>,
983        target: Location<Self::GPR, Self::SIMD>,
984        memarg: &MemArg,
985        ret: Location<Self::GPR, Self::SIMD>,
986        need_check: bool,
987        imported_memories: bool,
988        offset: i32,
989        heap_access_oob: Label,
990        unaligned_atomic: Label,
991    ) -> Result<(), CompileError>;
992    /// i32 atomic Xor with unsigned 16bits
993    #[allow(clippy::too_many_arguments)]
994    fn i32_atomic_xor_16u(
995        &mut self,
996        loc: Location<Self::GPR, Self::SIMD>,
997        target: Location<Self::GPR, Self::SIMD>,
998        memarg: &MemArg,
999        ret: Location<Self::GPR, Self::SIMD>,
1000        need_check: bool,
1001        imported_memories: bool,
1002        offset: i32,
1003        heap_access_oob: Label,
1004        unaligned_atomic: Label,
1005    ) -> Result<(), CompileError>;
1006    /// i32 atomic Exchange with i32
1007    #[allow(clippy::too_many_arguments)]
1008    fn i32_atomic_xchg(
1009        &mut self,
1010        loc: Location<Self::GPR, Self::SIMD>,
1011        target: Location<Self::GPR, Self::SIMD>,
1012        memarg: &MemArg,
1013        ret: Location<Self::GPR, Self::SIMD>,
1014        need_check: bool,
1015        imported_memories: bool,
1016        offset: i32,
1017        heap_access_oob: Label,
1018        unaligned_atomic: Label,
1019    ) -> Result<(), CompileError>;
1020    /// i32 atomic Exchange with u8
1021    #[allow(clippy::too_many_arguments)]
1022    fn i32_atomic_xchg_8u(
1023        &mut self,
1024        loc: Location<Self::GPR, Self::SIMD>,
1025        target: Location<Self::GPR, Self::SIMD>,
1026        memarg: &MemArg,
1027        ret: Location<Self::GPR, Self::SIMD>,
1028        need_check: bool,
1029        imported_memories: bool,
1030        offset: i32,
1031        heap_access_oob: Label,
1032        unaligned_atomic: Label,
1033    ) -> Result<(), CompileError>;
1034    /// i32 atomic Exchange with u16
1035    #[allow(clippy::too_many_arguments)]
1036    fn i32_atomic_xchg_16u(
1037        &mut self,
1038        loc: Location<Self::GPR, Self::SIMD>,
1039        target: Location<Self::GPR, Self::SIMD>,
1040        memarg: &MemArg,
1041        ret: Location<Self::GPR, Self::SIMD>,
1042        need_check: bool,
1043        imported_memories: bool,
1044        offset: i32,
1045        heap_access_oob: Label,
1046        unaligned_atomic: Label,
1047    ) -> Result<(), CompileError>;
1048    /// i32 atomic Compare and Exchange with i32
1049    #[allow(clippy::too_many_arguments)]
1050    fn i32_atomic_cmpxchg(
1051        &mut self,
1052        new: Location<Self::GPR, Self::SIMD>,
1053        cmp: Location<Self::GPR, Self::SIMD>,
1054        target: Location<Self::GPR, Self::SIMD>,
1055        memarg: &MemArg,
1056        ret: Location<Self::GPR, Self::SIMD>,
1057        need_check: bool,
1058        imported_memories: bool,
1059        offset: i32,
1060        heap_access_oob: Label,
1061        unaligned_atomic: Label,
1062    ) -> Result<(), CompileError>;
1063    /// i32 atomic Compare and Exchange with u8
1064    #[allow(clippy::too_many_arguments)]
1065    fn i32_atomic_cmpxchg_8u(
1066        &mut self,
1067        new: Location<Self::GPR, Self::SIMD>,
1068        cmp: Location<Self::GPR, Self::SIMD>,
1069        target: Location<Self::GPR, Self::SIMD>,
1070        memarg: &MemArg,
1071        ret: Location<Self::GPR, Self::SIMD>,
1072        need_check: bool,
1073        imported_memories: bool,
1074        offset: i32,
1075        heap_access_oob: Label,
1076        unaligned_atomic: Label,
1077    ) -> Result<(), CompileError>;
1078    /// i32 atomic Compare and Exchange with u16
1079    #[allow(clippy::too_many_arguments)]
1080    fn i32_atomic_cmpxchg_16u(
1081        &mut self,
1082        new: Location<Self::GPR, Self::SIMD>,
1083        cmp: Location<Self::GPR, Self::SIMD>,
1084        target: Location<Self::GPR, Self::SIMD>,
1085        memarg: &MemArg,
1086        ret: Location<Self::GPR, Self::SIMD>,
1087        need_check: bool,
1088        imported_memories: bool,
1089        offset: i32,
1090        heap_access_oob: Label,
1091        unaligned_atomic: Label,
1092    ) -> Result<(), CompileError>;
1093
1094    /// emit a move function address to GPR ready for call, using appropriate relocation
1095    fn emit_call_with_reloc(
1096        &mut self,
1097        calling_convention: CallingConvention,
1098        reloc_target: RelocationTarget,
1099    ) -> Result<Vec<Relocation>, CompileError>;
1100    /// Add with location directly from the stack
1101    fn emit_binop_add64(
1102        &mut self,
1103        loc_a: Location<Self::GPR, Self::SIMD>,
1104        loc_b: Location<Self::GPR, Self::SIMD>,
1105        ret: Location<Self::GPR, Self::SIMD>,
1106    ) -> Result<(), CompileError>;
1107    /// Sub with location directly from the stack
1108    fn emit_binop_sub64(
1109        &mut self,
1110        loc_a: Location<Self::GPR, Self::SIMD>,
1111        loc_b: Location<Self::GPR, Self::SIMD>,
1112        ret: Location<Self::GPR, Self::SIMD>,
1113    ) -> Result<(), CompileError>;
1114    /// Multiply with location directly from the stack
1115    fn emit_binop_mul64(
1116        &mut self,
1117        loc_a: Location<Self::GPR, Self::SIMD>,
1118        loc_b: Location<Self::GPR, Self::SIMD>,
1119        ret: Location<Self::GPR, Self::SIMD>,
1120    ) -> Result<(), CompileError>;
1121    /// Unsigned Division with location directly from the stack. return the offset of the DIV opcode, to mark as trappable.
1122    fn emit_binop_udiv64(
1123        &mut self,
1124        loc_a: Location<Self::GPR, Self::SIMD>,
1125        loc_b: Location<Self::GPR, Self::SIMD>,
1126        ret: Location<Self::GPR, Self::SIMD>,
1127        integer_division_by_zero: Label,
1128    ) -> Result<usize, CompileError>;
1129    /// Signed Division with location directly from the stack. return the offset of the DIV opcode, to mark as trappable.
1130    fn emit_binop_sdiv64(
1131        &mut self,
1132        loc_a: Location<Self::GPR, Self::SIMD>,
1133        loc_b: Location<Self::GPR, Self::SIMD>,
1134        ret: Location<Self::GPR, Self::SIMD>,
1135        integer_division_by_zero: Label,
1136        integer_overflow: Label,
1137    ) -> Result<usize, CompileError>;
1138    /// Unsigned Reminder (of a division) with location directly from the stack. return the offset of the DIV opcode, to mark as trappable.
1139    fn emit_binop_urem64(
1140        &mut self,
1141        loc_a: Location<Self::GPR, Self::SIMD>,
1142        loc_b: Location<Self::GPR, Self::SIMD>,
1143        ret: Location<Self::GPR, Self::SIMD>,
1144        integer_division_by_zero: Label,
1145    ) -> Result<usize, CompileError>;
1146    /// Signed Reminder (of a Division) with location directly from the stack. return the offset of the DIV opcode, to mark as trappable.
1147    fn emit_binop_srem64(
1148        &mut self,
1149        loc_a: Location<Self::GPR, Self::SIMD>,
1150        loc_b: Location<Self::GPR, Self::SIMD>,
1151        ret: Location<Self::GPR, Self::SIMD>,
1152        integer_division_by_zero: Label,
1153    ) -> Result<usize, CompileError>;
1154    /// And with location directly from the stack
1155    fn emit_binop_and64(
1156        &mut self,
1157        loc_a: Location<Self::GPR, Self::SIMD>,
1158        loc_b: Location<Self::GPR, Self::SIMD>,
1159        ret: Location<Self::GPR, Self::SIMD>,
1160    ) -> Result<(), CompileError>;
1161    /// Or with location directly from the stack
1162    fn emit_binop_or64(
1163        &mut self,
1164        loc_a: Location<Self::GPR, Self::SIMD>,
1165        loc_b: Location<Self::GPR, Self::SIMD>,
1166        ret: Location<Self::GPR, Self::SIMD>,
1167    ) -> Result<(), CompileError>;
1168    /// Xor with location directly from the stack
1169    fn emit_binop_xor64(
1170        &mut self,
1171        loc_a: Location<Self::GPR, Self::SIMD>,
1172        loc_b: Location<Self::GPR, Self::SIMD>,
1173        ret: Location<Self::GPR, Self::SIMD>,
1174    ) -> Result<(), CompileError>;
1175    /// Signed Greater of Equal Compare 2 i64, result in a GPR
1176    fn i64_cmp_ge_s(
1177        &mut self,
1178        loc_a: Location<Self::GPR, Self::SIMD>,
1179        loc_b: Location<Self::GPR, Self::SIMD>,
1180        ret: Location<Self::GPR, Self::SIMD>,
1181    ) -> Result<(), CompileError>;
1182    /// Signed Greater Than Compare 2 i64, result in a GPR
1183    fn i64_cmp_gt_s(
1184        &mut self,
1185        loc_a: Location<Self::GPR, Self::SIMD>,
1186        loc_b: Location<Self::GPR, Self::SIMD>,
1187        ret: Location<Self::GPR, Self::SIMD>,
1188    ) -> Result<(), CompileError>;
1189    /// Signed Less of Equal Compare 2 i64, result in a GPR
1190    fn i64_cmp_le_s(
1191        &mut self,
1192        loc_a: Location<Self::GPR, Self::SIMD>,
1193        loc_b: Location<Self::GPR, Self::SIMD>,
1194        ret: Location<Self::GPR, Self::SIMD>,
1195    ) -> Result<(), CompileError>;
1196    /// Signed Less Than Compare 2 i64, result in a GPR
1197    fn i64_cmp_lt_s(
1198        &mut self,
1199        loc_a: Location<Self::GPR, Self::SIMD>,
1200        loc_b: Location<Self::GPR, Self::SIMD>,
1201        ret: Location<Self::GPR, Self::SIMD>,
1202    ) -> Result<(), CompileError>;
1203    /// Unsigned Greater of Equal Compare 2 i64, result in a GPR
1204    fn i64_cmp_ge_u(
1205        &mut self,
1206        loc_a: Location<Self::GPR, Self::SIMD>,
1207        loc_b: Location<Self::GPR, Self::SIMD>,
1208        ret: Location<Self::GPR, Self::SIMD>,
1209    ) -> Result<(), CompileError>;
1210    /// Unsigned Greater Than Compare 2 i64, result in a GPR
1211    fn i64_cmp_gt_u(
1212        &mut self,
1213        loc_a: Location<Self::GPR, Self::SIMD>,
1214        loc_b: Location<Self::GPR, Self::SIMD>,
1215        ret: Location<Self::GPR, Self::SIMD>,
1216    ) -> Result<(), CompileError>;
1217    /// Unsigned Less of Equal Compare 2 i64, result in a GPR
1218    fn i64_cmp_le_u(
1219        &mut self,
1220        loc_a: Location<Self::GPR, Self::SIMD>,
1221        loc_b: Location<Self::GPR, Self::SIMD>,
1222        ret: Location<Self::GPR, Self::SIMD>,
1223    ) -> Result<(), CompileError>;
1224    /// Unsigned Less Than Compare 2 i64, result in a GPR
1225    fn i64_cmp_lt_u(
1226        &mut self,
1227        loc_a: Location<Self::GPR, Self::SIMD>,
1228        loc_b: Location<Self::GPR, Self::SIMD>,
1229        ret: Location<Self::GPR, Self::SIMD>,
1230    ) -> Result<(), CompileError>;
1231    /// Not Equal Compare 2 i64, result in a GPR
1232    fn i64_cmp_ne(
1233        &mut self,
1234        loc_a: Location<Self::GPR, Self::SIMD>,
1235        loc_b: Location<Self::GPR, Self::SIMD>,
1236        ret: Location<Self::GPR, Self::SIMD>,
1237    ) -> Result<(), CompileError>;
1238    /// Equal Compare 2 i64, result in a GPR
1239    fn i64_cmp_eq(
1240        &mut self,
1241        loc_a: Location<Self::GPR, Self::SIMD>,
1242        loc_b: Location<Self::GPR, Self::SIMD>,
1243        ret: Location<Self::GPR, Self::SIMD>,
1244    ) -> Result<(), CompileError>;
1245    /// Count Leading 0 bit of an i64
1246    fn i64_clz(
1247        &mut self,
1248        loc: Location<Self::GPR, Self::SIMD>,
1249        ret: Location<Self::GPR, Self::SIMD>,
1250    ) -> Result<(), CompileError>;
1251    /// Count Trailling 0 bit of an i64
1252    fn i64_ctz(
1253        &mut self,
1254        loc: Location<Self::GPR, Self::SIMD>,
1255        ret: Location<Self::GPR, Self::SIMD>,
1256    ) -> Result<(), CompileError>;
1257    /// Count the number of 1 bit of an i64
1258    fn i64_popcnt(
1259        &mut self,
1260        loc: Location<Self::GPR, Self::SIMD>,
1261        ret: Location<Self::GPR, Self::SIMD>,
1262    ) -> Result<(), CompileError>;
1263    /// i64 Logical Shift Left
1264    fn i64_shl(
1265        &mut self,
1266        loc_a: Location<Self::GPR, Self::SIMD>,
1267        loc_b: Location<Self::GPR, Self::SIMD>,
1268        ret: Location<Self::GPR, Self::SIMD>,
1269    ) -> Result<(), CompileError>;
1270    /// i64 Logical Shift Right
1271    fn i64_shr(
1272        &mut self,
1273        loc_a: Location<Self::GPR, Self::SIMD>,
1274        loc_b: Location<Self::GPR, Self::SIMD>,
1275        ret: Location<Self::GPR, Self::SIMD>,
1276    ) -> Result<(), CompileError>;
1277    /// i64 Arithmetic Shift Right
1278    fn i64_sar(
1279        &mut self,
1280        loc_a: Location<Self::GPR, Self::SIMD>,
1281        loc_b: Location<Self::GPR, Self::SIMD>,
1282        ret: Location<Self::GPR, Self::SIMD>,
1283    ) -> Result<(), CompileError>;
1284    /// i64 Roll Left
1285    fn i64_rol(
1286        &mut self,
1287        loc_a: Location<Self::GPR, Self::SIMD>,
1288        loc_b: Location<Self::GPR, Self::SIMD>,
1289        ret: Location<Self::GPR, Self::SIMD>,
1290    ) -> Result<(), CompileError>;
1291    /// i64 Roll Right
1292    fn i64_ror(
1293        &mut self,
1294        loc_a: Location<Self::GPR, Self::SIMD>,
1295        loc_b: Location<Self::GPR, Self::SIMD>,
1296        ret: Location<Self::GPR, Self::SIMD>,
1297    ) -> Result<(), CompileError>;
1298    /// i64 load
1299    #[allow(clippy::too_many_arguments)]
1300    fn i64_load(
1301        &mut self,
1302        addr: Location<Self::GPR, Self::SIMD>,
1303        memarg: &MemArg,
1304        ret: Location<Self::GPR, Self::SIMD>,
1305        need_check: bool,
1306        imported_memories: bool,
1307        offset: i32,
1308        heap_access_oob: Label,
1309        unaligned_atomic: Label,
1310    ) -> Result<(), CompileError>;
1311    /// i64 load of an unsigned 8bits
1312    #[allow(clippy::too_many_arguments)]
1313    fn i64_load_8u(
1314        &mut self,
1315        addr: Location<Self::GPR, Self::SIMD>,
1316        memarg: &MemArg,
1317        ret: Location<Self::GPR, Self::SIMD>,
1318        need_check: bool,
1319        imported_memories: bool,
1320        offset: i32,
1321        heap_access_oob: Label,
1322        unaligned_atomic: Label,
1323    ) -> Result<(), CompileError>;
1324    /// i64 load of an signed 8bits
1325    #[allow(clippy::too_many_arguments)]
1326    fn i64_load_8s(
1327        &mut self,
1328        addr: Location<Self::GPR, Self::SIMD>,
1329        memarg: &MemArg,
1330        ret: Location<Self::GPR, Self::SIMD>,
1331        need_check: bool,
1332        imported_memories: bool,
1333        offset: i32,
1334        heap_access_oob: Label,
1335        unaligned_atomic: Label,
1336    ) -> Result<(), CompileError>;
1337    /// i64 load of an unsigned 32bits
1338    #[allow(clippy::too_many_arguments)]
1339    fn i64_load_32u(
1340        &mut self,
1341        addr: Location<Self::GPR, Self::SIMD>,
1342        memarg: &MemArg,
1343        ret: Location<Self::GPR, Self::SIMD>,
1344        need_check: bool,
1345        imported_memories: bool,
1346        offset: i32,
1347        heap_access_oob: Label,
1348        unaligned_atomic: Label,
1349    ) -> Result<(), CompileError>;
1350    /// i64 load of an signed 32bits
1351    #[allow(clippy::too_many_arguments)]
1352    fn i64_load_32s(
1353        &mut self,
1354        addr: Location<Self::GPR, Self::SIMD>,
1355        memarg: &MemArg,
1356        ret: Location<Self::GPR, Self::SIMD>,
1357        need_check: bool,
1358        imported_memories: bool,
1359        offset: i32,
1360        heap_access_oob: Label,
1361        unaligned_atomic: Label,
1362    ) -> Result<(), CompileError>;
1363    /// i64 load of an signed 16bits
1364    #[allow(clippy::too_many_arguments)]
1365    fn i64_load_16u(
1366        &mut self,
1367        addr: Location<Self::GPR, Self::SIMD>,
1368        memarg: &MemArg,
1369        ret: Location<Self::GPR, Self::SIMD>,
1370        need_check: bool,
1371        imported_memories: bool,
1372        offset: i32,
1373        heap_access_oob: Label,
1374        unaligned_atomic: Label,
1375    ) -> Result<(), CompileError>;
1376    /// i64 load of an signed 16bits
1377    #[allow(clippy::too_many_arguments)]
1378    fn i64_load_16s(
1379        &mut self,
1380        addr: Location<Self::GPR, Self::SIMD>,
1381        memarg: &MemArg,
1382        ret: Location<Self::GPR, Self::SIMD>,
1383        need_check: bool,
1384        imported_memories: bool,
1385        offset: i32,
1386        heap_access_oob: Label,
1387        unaligned_atomic: Label,
1388    ) -> Result<(), CompileError>;
1389    /// i64 atomic load
1390    #[allow(clippy::too_many_arguments)]
1391    fn i64_atomic_load(
1392        &mut self,
1393        addr: Location<Self::GPR, Self::SIMD>,
1394        memarg: &MemArg,
1395        ret: Location<Self::GPR, Self::SIMD>,
1396        need_check: bool,
1397        imported_memories: bool,
1398        offset: i32,
1399        heap_access_oob: Label,
1400        unaligned_atomic: Label,
1401    ) -> Result<(), CompileError>;
1402    /// i64 atomic load from unsigned 8bits
1403    #[allow(clippy::too_many_arguments)]
1404    fn i64_atomic_load_8u(
1405        &mut self,
1406        addr: Location<Self::GPR, Self::SIMD>,
1407        memarg: &MemArg,
1408        ret: Location<Self::GPR, Self::SIMD>,
1409        need_check: bool,
1410        imported_memories: bool,
1411        offset: i32,
1412        heap_access_oob: Label,
1413        unaligned_atomic: Label,
1414    ) -> Result<(), CompileError>;
1415    /// i64 atomic load from unsigned 16bits
1416    #[allow(clippy::too_many_arguments)]
1417    fn i64_atomic_load_16u(
1418        &mut self,
1419        addr: Location<Self::GPR, Self::SIMD>,
1420        memarg: &MemArg,
1421        ret: Location<Self::GPR, Self::SIMD>,
1422        need_check: bool,
1423        imported_memories: bool,
1424        offset: i32,
1425        heap_access_oob: Label,
1426        unaligned_atomic: Label,
1427    ) -> Result<(), CompileError>;
1428    /// i64 atomic load from unsigned 32bits
1429    #[allow(clippy::too_many_arguments)]
1430    fn i64_atomic_load_32u(
1431        &mut self,
1432        addr: Location<Self::GPR, Self::SIMD>,
1433        memarg: &MemArg,
1434        ret: Location<Self::GPR, Self::SIMD>,
1435        need_check: bool,
1436        imported_memories: bool,
1437        offset: i32,
1438        heap_access_oob: Label,
1439        unaligned_atomic: Label,
1440    ) -> Result<(), CompileError>;
1441    /// i64 save
1442    #[allow(clippy::too_many_arguments)]
1443    fn i64_save(
1444        &mut self,
1445        value: Location<Self::GPR, Self::SIMD>,
1446        memarg: &MemArg,
1447        addr: Location<Self::GPR, Self::SIMD>,
1448        need_check: bool,
1449        imported_memories: bool,
1450        offset: i32,
1451        heap_access_oob: Label,
1452        unaligned_atomic: Label,
1453    ) -> Result<(), CompileError>;
1454    /// i64 save of the lower 8bits
1455    #[allow(clippy::too_many_arguments)]
1456    fn i64_save_8(
1457        &mut self,
1458        value: Location<Self::GPR, Self::SIMD>,
1459        memarg: &MemArg,
1460        addr: Location<Self::GPR, Self::SIMD>,
1461        need_check: bool,
1462        imported_memories: bool,
1463        offset: i32,
1464        heap_access_oob: Label,
1465        unaligned_atomic: Label,
1466    ) -> Result<(), CompileError>;
1467    /// i64 save of the lower 16bits
1468    #[allow(clippy::too_many_arguments)]
1469    fn i64_save_16(
1470        &mut self,
1471        value: Location<Self::GPR, Self::SIMD>,
1472        memarg: &MemArg,
1473        addr: Location<Self::GPR, Self::SIMD>,
1474        need_check: bool,
1475        imported_memories: bool,
1476        offset: i32,
1477        heap_access_oob: Label,
1478        unaligned_atomic: Label,
1479    ) -> Result<(), CompileError>;
1480    /// i64 save of the lower 32bits
1481    #[allow(clippy::too_many_arguments)]
1482    fn i64_save_32(
1483        &mut self,
1484        value: Location<Self::GPR, Self::SIMD>,
1485        memarg: &MemArg,
1486        addr: Location<Self::GPR, Self::SIMD>,
1487        need_check: bool,
1488        imported_memories: bool,
1489        offset: i32,
1490        heap_access_oob: Label,
1491        unaligned_atomic: Label,
1492    ) -> Result<(), CompileError>;
1493    /// i64 atomic save
1494    #[allow(clippy::too_many_arguments)]
1495    fn i64_atomic_save(
1496        &mut self,
1497        value: Location<Self::GPR, Self::SIMD>,
1498        memarg: &MemArg,
1499        addr: Location<Self::GPR, Self::SIMD>,
1500        need_check: bool,
1501        imported_memories: bool,
1502        offset: i32,
1503        heap_access_oob: Label,
1504        unaligned_atomic: Label,
1505    ) -> Result<(), CompileError>;
1506    /// i64 atomic save of a the lower 8bits
1507    #[allow(clippy::too_many_arguments)]
1508    fn i64_atomic_save_8(
1509        &mut self,
1510        value: Location<Self::GPR, Self::SIMD>,
1511        memarg: &MemArg,
1512        addr: Location<Self::GPR, Self::SIMD>,
1513        need_check: bool,
1514        imported_memories: bool,
1515        offset: i32,
1516        heap_access_oob: Label,
1517        unaligned_atomic: Label,
1518    ) -> Result<(), CompileError>;
1519    /// i64 atomic save of a the lower 16bits
1520    #[allow(clippy::too_many_arguments)]
1521    fn i64_atomic_save_16(
1522        &mut self,
1523        value: Location<Self::GPR, Self::SIMD>,
1524        memarg: &MemArg,
1525        addr: Location<Self::GPR, Self::SIMD>,
1526        need_check: bool,
1527        imported_memories: bool,
1528        offset: i32,
1529        heap_access_oob: Label,
1530        unaligned_atomic: Label,
1531    ) -> Result<(), CompileError>;
1532    /// i64 atomic save of a the lower 32bits
1533    #[allow(clippy::too_many_arguments)]
1534    fn i64_atomic_save_32(
1535        &mut self,
1536        value: Location<Self::GPR, Self::SIMD>,
1537        memarg: &MemArg,
1538        addr: Location<Self::GPR, Self::SIMD>,
1539        need_check: bool,
1540        imported_memories: bool,
1541        offset: i32,
1542        heap_access_oob: Label,
1543        unaligned_atomic: Label,
1544    ) -> Result<(), CompileError>;
1545    /// i64 atomic Add with i64
1546    #[allow(clippy::too_many_arguments)]
1547    fn i64_atomic_add(
1548        &mut self,
1549        loc: Location<Self::GPR, Self::SIMD>,
1550        target: Location<Self::GPR, Self::SIMD>,
1551        memarg: &MemArg,
1552        ret: Location<Self::GPR, Self::SIMD>,
1553        need_check: bool,
1554        imported_memories: bool,
1555        offset: i32,
1556        heap_access_oob: Label,
1557        unaligned_atomic: Label,
1558    ) -> Result<(), CompileError>;
1559    /// i64 atomic Add with unsigned 8bits
1560    #[allow(clippy::too_many_arguments)]
1561    fn i64_atomic_add_8u(
1562        &mut self,
1563        loc: Location<Self::GPR, Self::SIMD>,
1564        target: Location<Self::GPR, Self::SIMD>,
1565        memarg: &MemArg,
1566        ret: Location<Self::GPR, Self::SIMD>,
1567        need_check: bool,
1568        imported_memories: bool,
1569        offset: i32,
1570        heap_access_oob: Label,
1571        unaligned_atomic: Label,
1572    ) -> Result<(), CompileError>;
1573    /// i64 atomic Add with unsigned 16bits
1574    #[allow(clippy::too_many_arguments)]
1575    fn i64_atomic_add_16u(
1576        &mut self,
1577        loc: Location<Self::GPR, Self::SIMD>,
1578        target: Location<Self::GPR, Self::SIMD>,
1579        memarg: &MemArg,
1580        ret: Location<Self::GPR, Self::SIMD>,
1581        need_check: bool,
1582        imported_memories: bool,
1583        offset: i32,
1584        heap_access_oob: Label,
1585        unaligned_atomic: Label,
1586    ) -> Result<(), CompileError>;
1587    /// i64 atomic Add with unsigned 32bits
1588    #[allow(clippy::too_many_arguments)]
1589    fn i64_atomic_add_32u(
1590        &mut self,
1591        loc: Location<Self::GPR, Self::SIMD>,
1592        target: Location<Self::GPR, Self::SIMD>,
1593        memarg: &MemArg,
1594        ret: Location<Self::GPR, Self::SIMD>,
1595        need_check: bool,
1596        imported_memories: bool,
1597        offset: i32,
1598        heap_access_oob: Label,
1599        unaligned_atomic: Label,
1600    ) -> Result<(), CompileError>;
1601    /// i64 atomic Sub with i64
1602    #[allow(clippy::too_many_arguments)]
1603    fn i64_atomic_sub(
1604        &mut self,
1605        loc: Location<Self::GPR, Self::SIMD>,
1606        target: Location<Self::GPR, Self::SIMD>,
1607        memarg: &MemArg,
1608        ret: Location<Self::GPR, Self::SIMD>,
1609        need_check: bool,
1610        imported_memories: bool,
1611        offset: i32,
1612        heap_access_oob: Label,
1613        unaligned_atomic: Label,
1614    ) -> Result<(), CompileError>;
1615    /// i64 atomic Sub with unsigned 8bits
1616    #[allow(clippy::too_many_arguments)]
1617    fn i64_atomic_sub_8u(
1618        &mut self,
1619        loc: Location<Self::GPR, Self::SIMD>,
1620        target: Location<Self::GPR, Self::SIMD>,
1621        memarg: &MemArg,
1622        ret: Location<Self::GPR, Self::SIMD>,
1623        need_check: bool,
1624        imported_memories: bool,
1625        offset: i32,
1626        heap_access_oob: Label,
1627        unaligned_atomic: Label,
1628    ) -> Result<(), CompileError>;
1629    /// i64 atomic Sub with unsigned 16bits
1630    #[allow(clippy::too_many_arguments)]
1631    fn i64_atomic_sub_16u(
1632        &mut self,
1633        loc: Location<Self::GPR, Self::SIMD>,
1634        target: Location<Self::GPR, Self::SIMD>,
1635        memarg: &MemArg,
1636        ret: Location<Self::GPR, Self::SIMD>,
1637        need_check: bool,
1638        imported_memories: bool,
1639        offset: i32,
1640        heap_access_oob: Label,
1641        unaligned_atomic: Label,
1642    ) -> Result<(), CompileError>;
1643    /// i64 atomic Sub with unsigned 32bits
1644    #[allow(clippy::too_many_arguments)]
1645    fn i64_atomic_sub_32u(
1646        &mut self,
1647        loc: Location<Self::GPR, Self::SIMD>,
1648        target: Location<Self::GPR, Self::SIMD>,
1649        memarg: &MemArg,
1650        ret: Location<Self::GPR, Self::SIMD>,
1651        need_check: bool,
1652        imported_memories: bool,
1653        offset: i32,
1654        heap_access_oob: Label,
1655        unaligned_atomic: Label,
1656    ) -> Result<(), CompileError>;
1657    /// i64 atomic And with i64
1658    #[allow(clippy::too_many_arguments)]
1659    fn i64_atomic_and(
1660        &mut self,
1661        loc: Location<Self::GPR, Self::SIMD>,
1662        target: Location<Self::GPR, Self::SIMD>,
1663        memarg: &MemArg,
1664        ret: Location<Self::GPR, Self::SIMD>,
1665        need_check: bool,
1666        imported_memories: bool,
1667        offset: i32,
1668        heap_access_oob: Label,
1669        unaligned_atomic: Label,
1670    ) -> Result<(), CompileError>;
1671    /// i64 atomic And with unsigned 8bits
1672    #[allow(clippy::too_many_arguments)]
1673    fn i64_atomic_and_8u(
1674        &mut self,
1675        loc: Location<Self::GPR, Self::SIMD>,
1676        target: Location<Self::GPR, Self::SIMD>,
1677        memarg: &MemArg,
1678        ret: Location<Self::GPR, Self::SIMD>,
1679        need_check: bool,
1680        imported_memories: bool,
1681        offset: i32,
1682        heap_access_oob: Label,
1683        unaligned_atomic: Label,
1684    ) -> Result<(), CompileError>;
1685    /// i64 atomic And with unsigned 16bits
1686    #[allow(clippy::too_many_arguments)]
1687    fn i64_atomic_and_16u(
1688        &mut self,
1689        loc: Location<Self::GPR, Self::SIMD>,
1690        target: Location<Self::GPR, Self::SIMD>,
1691        memarg: &MemArg,
1692        ret: Location<Self::GPR, Self::SIMD>,
1693        need_check: bool,
1694        imported_memories: bool,
1695        offset: i32,
1696        heap_access_oob: Label,
1697        unaligned_atomic: Label,
1698    ) -> Result<(), CompileError>;
1699    /// i64 atomic And with unsigned 32bits
1700    #[allow(clippy::too_many_arguments)]
1701    fn i64_atomic_and_32u(
1702        &mut self,
1703        loc: Location<Self::GPR, Self::SIMD>,
1704        target: Location<Self::GPR, Self::SIMD>,
1705        memarg: &MemArg,
1706        ret: Location<Self::GPR, Self::SIMD>,
1707        need_check: bool,
1708        imported_memories: bool,
1709        offset: i32,
1710        heap_access_oob: Label,
1711        unaligned_atomic: Label,
1712    ) -> Result<(), CompileError>;
1713    /// i64 atomic Or with i64
1714    #[allow(clippy::too_many_arguments)]
1715    fn i64_atomic_or(
1716        &mut self,
1717        loc: Location<Self::GPR, Self::SIMD>,
1718        target: Location<Self::GPR, Self::SIMD>,
1719        memarg: &MemArg,
1720        ret: Location<Self::GPR, Self::SIMD>,
1721        need_check: bool,
1722        imported_memories: bool,
1723        offset: i32,
1724        heap_access_oob: Label,
1725        unaligned_atomic: Label,
1726    ) -> Result<(), CompileError>;
1727    /// i64 atomic Or with unsigned 8bits
1728    #[allow(clippy::too_many_arguments)]
1729    fn i64_atomic_or_8u(
1730        &mut self,
1731        loc: Location<Self::GPR, Self::SIMD>,
1732        target: Location<Self::GPR, Self::SIMD>,
1733        memarg: &MemArg,
1734        ret: Location<Self::GPR, Self::SIMD>,
1735        need_check: bool,
1736        imported_memories: bool,
1737        offset: i32,
1738        heap_access_oob: Label,
1739        unaligned_atomic: Label,
1740    ) -> Result<(), CompileError>;
1741    /// i64 atomic Or with unsigned 16bits
1742    #[allow(clippy::too_many_arguments)]
1743    fn i64_atomic_or_16u(
1744        &mut self,
1745        loc: Location<Self::GPR, Self::SIMD>,
1746        target: Location<Self::GPR, Self::SIMD>,
1747        memarg: &MemArg,
1748        ret: Location<Self::GPR, Self::SIMD>,
1749        need_check: bool,
1750        imported_memories: bool,
1751        offset: i32,
1752        heap_access_oob: Label,
1753        unaligned_atomic: Label,
1754    ) -> Result<(), CompileError>;
1755    /// i64 atomic Or with unsigned 32bits
1756    #[allow(clippy::too_many_arguments)]
1757    fn i64_atomic_or_32u(
1758        &mut self,
1759        loc: Location<Self::GPR, Self::SIMD>,
1760        target: Location<Self::GPR, Self::SIMD>,
1761        memarg: &MemArg,
1762        ret: Location<Self::GPR, Self::SIMD>,
1763        need_check: bool,
1764        imported_memories: bool,
1765        offset: i32,
1766        heap_access_oob: Label,
1767        unaligned_atomic: Label,
1768    ) -> Result<(), CompileError>;
1769    /// i64 atomic Xor with i64
1770    #[allow(clippy::too_many_arguments)]
1771    fn i64_atomic_xor(
1772        &mut self,
1773        loc: Location<Self::GPR, Self::SIMD>,
1774        target: Location<Self::GPR, Self::SIMD>,
1775        memarg: &MemArg,
1776        ret: Location<Self::GPR, Self::SIMD>,
1777        need_check: bool,
1778        imported_memories: bool,
1779        offset: i32,
1780        heap_access_oob: Label,
1781        unaligned_atomic: Label,
1782    ) -> Result<(), CompileError>;
1783    /// i64 atomic Xor with unsigned 8bits
1784    #[allow(clippy::too_many_arguments)]
1785    fn i64_atomic_xor_8u(
1786        &mut self,
1787        loc: Location<Self::GPR, Self::SIMD>,
1788        target: Location<Self::GPR, Self::SIMD>,
1789        memarg: &MemArg,
1790        ret: Location<Self::GPR, Self::SIMD>,
1791        need_check: bool,
1792        imported_memories: bool,
1793        offset: i32,
1794        heap_access_oob: Label,
1795        unaligned_atomic: Label,
1796    ) -> Result<(), CompileError>;
1797    /// i64 atomic Xor with unsigned 16bits
1798    #[allow(clippy::too_many_arguments)]
1799    fn i64_atomic_xor_16u(
1800        &mut self,
1801        loc: Location<Self::GPR, Self::SIMD>,
1802        target: Location<Self::GPR, Self::SIMD>,
1803        memarg: &MemArg,
1804        ret: Location<Self::GPR, Self::SIMD>,
1805        need_check: bool,
1806        imported_memories: bool,
1807        offset: i32,
1808        heap_access_oob: Label,
1809        unaligned_atomic: Label,
1810    ) -> Result<(), CompileError>;
1811    /// i64 atomic Xor with unsigned 32bits
1812    #[allow(clippy::too_many_arguments)]
1813    fn i64_atomic_xor_32u(
1814        &mut self,
1815        loc: Location<Self::GPR, Self::SIMD>,
1816        target: Location<Self::GPR, Self::SIMD>,
1817        memarg: &MemArg,
1818        ret: Location<Self::GPR, Self::SIMD>,
1819        need_check: bool,
1820        imported_memories: bool,
1821        offset: i32,
1822        heap_access_oob: Label,
1823        unaligned_atomic: Label,
1824    ) -> Result<(), CompileError>;
1825    /// i64 atomic Exchange with i64
1826    #[allow(clippy::too_many_arguments)]
1827    fn i64_atomic_xchg(
1828        &mut self,
1829        loc: Location<Self::GPR, Self::SIMD>,
1830        target: Location<Self::GPR, Self::SIMD>,
1831        memarg: &MemArg,
1832        ret: Location<Self::GPR, Self::SIMD>,
1833        need_check: bool,
1834        imported_memories: bool,
1835        offset: i32,
1836        heap_access_oob: Label,
1837        unaligned_atomic: Label,
1838    ) -> Result<(), CompileError>;
1839    /// i64 atomic Exchange with u8
1840    #[allow(clippy::too_many_arguments)]
1841    fn i64_atomic_xchg_8u(
1842        &mut self,
1843        loc: Location<Self::GPR, Self::SIMD>,
1844        target: Location<Self::GPR, Self::SIMD>,
1845        memarg: &MemArg,
1846        ret: Location<Self::GPR, Self::SIMD>,
1847        need_check: bool,
1848        imported_memories: bool,
1849        offset: i32,
1850        heap_access_oob: Label,
1851        unaligned_atomic: Label,
1852    ) -> Result<(), CompileError>;
1853    /// i64 atomic Exchange with u16
1854    #[allow(clippy::too_many_arguments)]
1855    fn i64_atomic_xchg_16u(
1856        &mut self,
1857        loc: Location<Self::GPR, Self::SIMD>,
1858        target: Location<Self::GPR, Self::SIMD>,
1859        memarg: &MemArg,
1860        ret: Location<Self::GPR, Self::SIMD>,
1861        need_check: bool,
1862        imported_memories: bool,
1863        offset: i32,
1864        heap_access_oob: Label,
1865        unaligned_atomic: Label,
1866    ) -> Result<(), CompileError>;
1867    /// i64 atomic Exchange with u32
1868    #[allow(clippy::too_many_arguments)]
1869    fn i64_atomic_xchg_32u(
1870        &mut self,
1871        loc: Location<Self::GPR, Self::SIMD>,
1872        target: Location<Self::GPR, Self::SIMD>,
1873        memarg: &MemArg,
1874        ret: Location<Self::GPR, Self::SIMD>,
1875        need_check: bool,
1876        imported_memories: bool,
1877        offset: i32,
1878        heap_access_oob: Label,
1879        unaligned_atomic: Label,
1880    ) -> Result<(), CompileError>;
1881    /// i64 atomic Compare and Exchange with i32
1882    #[allow(clippy::too_many_arguments)]
1883    fn i64_atomic_cmpxchg(
1884        &mut self,
1885        new: Location<Self::GPR, Self::SIMD>,
1886        cmp: Location<Self::GPR, Self::SIMD>,
1887        target: Location<Self::GPR, Self::SIMD>,
1888        memarg: &MemArg,
1889        ret: Location<Self::GPR, Self::SIMD>,
1890        need_check: bool,
1891        imported_memories: bool,
1892        offset: i32,
1893        heap_access_oob: Label,
1894        unaligned_atomic: Label,
1895    ) -> Result<(), CompileError>;
1896    /// i64 atomic Compare and Exchange with u8
1897    #[allow(clippy::too_many_arguments)]
1898    fn i64_atomic_cmpxchg_8u(
1899        &mut self,
1900        new: Location<Self::GPR, Self::SIMD>,
1901        cmp: Location<Self::GPR, Self::SIMD>,
1902        target: Location<Self::GPR, Self::SIMD>,
1903        memarg: &MemArg,
1904        ret: Location<Self::GPR, Self::SIMD>,
1905        need_check: bool,
1906        imported_memories: bool,
1907        offset: i32,
1908        heap_access_oob: Label,
1909        unaligned_atomic: Label,
1910    ) -> Result<(), CompileError>;
1911    /// i64 atomic Compare and Exchange with u16
1912    #[allow(clippy::too_many_arguments)]
1913    fn i64_atomic_cmpxchg_16u(
1914        &mut self,
1915        new: Location<Self::GPR, Self::SIMD>,
1916        cmp: Location<Self::GPR, Self::SIMD>,
1917        target: Location<Self::GPR, Self::SIMD>,
1918        memarg: &MemArg,
1919        ret: Location<Self::GPR, Self::SIMD>,
1920        need_check: bool,
1921        imported_memories: bool,
1922        offset: i32,
1923        heap_access_oob: Label,
1924        unaligned_atomic: Label,
1925    ) -> Result<(), CompileError>;
1926    /// i64 atomic Compare and Exchange with u32
1927    #[allow(clippy::too_many_arguments)]
1928    fn i64_atomic_cmpxchg_32u(
1929        &mut self,
1930        new: Location<Self::GPR, Self::SIMD>,
1931        cmp: Location<Self::GPR, Self::SIMD>,
1932        target: Location<Self::GPR, Self::SIMD>,
1933        memarg: &MemArg,
1934        ret: Location<Self::GPR, Self::SIMD>,
1935        need_check: bool,
1936        imported_memories: bool,
1937        offset: i32,
1938        heap_access_oob: Label,
1939        unaligned_atomic: Label,
1940    ) -> Result<(), CompileError>;
1941
1942    /// load an F32
1943    #[allow(clippy::too_many_arguments)]
1944    fn f32_load(
1945        &mut self,
1946        addr: Location<Self::GPR, Self::SIMD>,
1947        memarg: &MemArg,
1948        ret: Location<Self::GPR, Self::SIMD>,
1949        need_check: bool,
1950        imported_memories: bool,
1951        offset: i32,
1952        heap_access_oob: Label,
1953        unaligned_atomic: Label,
1954    ) -> Result<(), CompileError>;
1955    /// f32 save
1956    #[allow(clippy::too_many_arguments)]
1957    fn f32_save(
1958        &mut self,
1959        value: Location<Self::GPR, Self::SIMD>,
1960        memarg: &MemArg,
1961        addr: Location<Self::GPR, Self::SIMD>,
1962        canonicalize: bool,
1963        need_check: bool,
1964        imported_memories: bool,
1965        offset: i32,
1966        heap_access_oob: Label,
1967        unaligned_atomic: Label,
1968    ) -> Result<(), CompileError>;
1969    /// load an F64
1970    #[allow(clippy::too_many_arguments)]
1971    fn f64_load(
1972        &mut self,
1973        addr: Location<Self::GPR, Self::SIMD>,
1974        memarg: &MemArg,
1975        ret: Location<Self::GPR, Self::SIMD>,
1976        need_check: bool,
1977        imported_memories: bool,
1978        offset: i32,
1979        heap_access_oob: Label,
1980        unaligned_atomic: Label,
1981    ) -> Result<(), CompileError>;
1982    /// f64 save
1983    #[allow(clippy::too_many_arguments)]
1984    fn f64_save(
1985        &mut self,
1986        value: Location<Self::GPR, Self::SIMD>,
1987        memarg: &MemArg,
1988        addr: Location<Self::GPR, Self::SIMD>,
1989        canonicalize: bool,
1990        need_check: bool,
1991        imported_memories: bool,
1992        offset: i32,
1993        heap_access_oob: Label,
1994        unaligned_atomic: Label,
1995    ) -> Result<(), CompileError>;
1996    /// Convert a F64 from I64, signed or unsigned
1997    fn convert_f64_i64(
1998        &mut self,
1999        loc: Location<Self::GPR, Self::SIMD>,
2000        signed: bool,
2001        ret: Location<Self::GPR, Self::SIMD>,
2002    ) -> Result<(), CompileError>;
2003    /// Convert a F64 from I32, signed or unsigned
2004    fn convert_f64_i32(
2005        &mut self,
2006        loc: Location<Self::GPR, Self::SIMD>,
2007        signed: bool,
2008        ret: Location<Self::GPR, Self::SIMD>,
2009    ) -> Result<(), CompileError>;
2010    /// Convert a F32 from I64, signed or unsigned
2011    fn convert_f32_i64(
2012        &mut self,
2013        loc: Location<Self::GPR, Self::SIMD>,
2014        signed: bool,
2015        ret: Location<Self::GPR, Self::SIMD>,
2016    ) -> Result<(), CompileError>;
2017    /// Convert a F32 from I32, signed or unsigned
2018    fn convert_f32_i32(
2019        &mut self,
2020        loc: Location<Self::GPR, Self::SIMD>,
2021        signed: bool,
2022        ret: Location<Self::GPR, Self::SIMD>,
2023    ) -> Result<(), CompileError>;
2024    /// Convert a F64 to I64, signed or unsigned, without or without saturation
2025    fn convert_i64_f64(
2026        &mut self,
2027        loc: Location<Self::GPR, Self::SIMD>,
2028        ret: Location<Self::GPR, Self::SIMD>,
2029        signed: bool,
2030        sat: bool,
2031    ) -> Result<(), CompileError>;
2032    /// Convert a F64 to I32, signed or unsigned, without or without saturation
2033    fn convert_i32_f64(
2034        &mut self,
2035        loc: Location<Self::GPR, Self::SIMD>,
2036        ret: Location<Self::GPR, Self::SIMD>,
2037        signed: bool,
2038        sat: bool,
2039    ) -> Result<(), CompileError>;
2040    /// Convert a F32 to I64, signed or unsigned, without or without saturation
2041    fn convert_i64_f32(
2042        &mut self,
2043        loc: Location<Self::GPR, Self::SIMD>,
2044        ret: Location<Self::GPR, Self::SIMD>,
2045        signed: bool,
2046        sat: bool,
2047    ) -> Result<(), CompileError>;
2048    /// Convert a F32 to I32, signed or unsigned, without or without saturation
2049    fn convert_i32_f32(
2050        &mut self,
2051        loc: Location<Self::GPR, Self::SIMD>,
2052        ret: Location<Self::GPR, Self::SIMD>,
2053        signed: bool,
2054        sat: bool,
2055    ) -> Result<(), CompileError>;
2056    /// Convert a F32 to F64
2057    fn convert_f64_f32(
2058        &mut self,
2059        loc: Location<Self::GPR, Self::SIMD>,
2060        ret: Location<Self::GPR, Self::SIMD>,
2061    ) -> Result<(), CompileError>;
2062    /// Convert a F64 to F32
2063    fn convert_f32_f64(
2064        &mut self,
2065        loc: Location<Self::GPR, Self::SIMD>,
2066        ret: Location<Self::GPR, Self::SIMD>,
2067    ) -> Result<(), CompileError>;
2068    /// Negate an F64
2069    fn f64_neg(
2070        &mut self,
2071        loc: Location<Self::GPR, Self::SIMD>,
2072        ret: Location<Self::GPR, Self::SIMD>,
2073    ) -> Result<(), CompileError>;
2074    /// Get the Absolute Value of an F64
2075    fn f64_abs(
2076        &mut self,
2077        loc: Location<Self::GPR, Self::SIMD>,
2078        ret: Location<Self::GPR, Self::SIMD>,
2079    ) -> Result<(), CompileError>;
2080    /// Copy sign from tmp1 Self::GPR to tmp2 Self::GPR
2081    fn emit_i64_copysign(&mut self, tmp1: Self::GPR, tmp2: Self::GPR) -> Result<(), CompileError>;
2082    /// Get the Square Root of an F64
2083    fn f64_sqrt(
2084        &mut self,
2085        loc: Location<Self::GPR, Self::SIMD>,
2086        ret: Location<Self::GPR, Self::SIMD>,
2087    ) -> Result<(), CompileError>;
2088    /// Trunc of an F64
2089    fn f64_trunc(
2090        &mut self,
2091        loc: Location<Self::GPR, Self::SIMD>,
2092        ret: Location<Self::GPR, Self::SIMD>,
2093    ) -> Result<(), CompileError>;
2094    /// Ceil of an F64
2095    fn f64_ceil(
2096        &mut self,
2097        loc: Location<Self::GPR, Self::SIMD>,
2098        ret: Location<Self::GPR, Self::SIMD>,
2099    ) -> Result<(), CompileError>;
2100    /// Floor of an F64
2101    fn f64_floor(
2102        &mut self,
2103        loc: Location<Self::GPR, Self::SIMD>,
2104        ret: Location<Self::GPR, Self::SIMD>,
2105    ) -> Result<(), CompileError>;
2106    /// Round at nearest int of an F64
2107    fn f64_nearest(
2108        &mut self,
2109        loc: Location<Self::GPR, Self::SIMD>,
2110        ret: Location<Self::GPR, Self::SIMD>,
2111    ) -> Result<(), CompileError>;
2112    /// Greater of Equal Compare 2 F64, result in a GPR
2113    fn f64_cmp_ge(
2114        &mut self,
2115        loc_a: Location<Self::GPR, Self::SIMD>,
2116        loc_b: Location<Self::GPR, Self::SIMD>,
2117        ret: Location<Self::GPR, Self::SIMD>,
2118    ) -> Result<(), CompileError>;
2119    /// Greater Than Compare 2 F64, result in a GPR
2120    fn f64_cmp_gt(
2121        &mut self,
2122        loc_a: Location<Self::GPR, Self::SIMD>,
2123        loc_b: Location<Self::GPR, Self::SIMD>,
2124        ret: Location<Self::GPR, Self::SIMD>,
2125    ) -> Result<(), CompileError>;
2126    /// Less of Equal Compare 2 F64, result in a GPR
2127    fn f64_cmp_le(
2128        &mut self,
2129        loc_a: Location<Self::GPR, Self::SIMD>,
2130        loc_b: Location<Self::GPR, Self::SIMD>,
2131        ret: Location<Self::GPR, Self::SIMD>,
2132    ) -> Result<(), CompileError>;
2133    /// Less Than Compare 2 F64, result in a GPR
2134    fn f64_cmp_lt(
2135        &mut self,
2136        loc_a: Location<Self::GPR, Self::SIMD>,
2137        loc_b: Location<Self::GPR, Self::SIMD>,
2138        ret: Location<Self::GPR, Self::SIMD>,
2139    ) -> Result<(), CompileError>;
2140    /// Not Equal Compare 2 F64, result in a GPR
2141    fn f64_cmp_ne(
2142        &mut self,
2143        loc_a: Location<Self::GPR, Self::SIMD>,
2144        loc_b: Location<Self::GPR, Self::SIMD>,
2145        ret: Location<Self::GPR, Self::SIMD>,
2146    ) -> Result<(), CompileError>;
2147    /// Equal Compare 2 F64, result in a GPR
2148    fn f64_cmp_eq(
2149        &mut self,
2150        loc_a: Location<Self::GPR, Self::SIMD>,
2151        loc_b: Location<Self::GPR, Self::SIMD>,
2152        ret: Location<Self::GPR, Self::SIMD>,
2153    ) -> Result<(), CompileError>;
2154    /// get Min for 2 F64 values
2155    fn f64_min(
2156        &mut self,
2157        loc_a: Location<Self::GPR, Self::SIMD>,
2158        loc_b: Location<Self::GPR, Self::SIMD>,
2159        ret: Location<Self::GPR, Self::SIMD>,
2160    ) -> Result<(), CompileError>;
2161    /// get Max for 2 F64 values
2162    fn f64_max(
2163        &mut self,
2164        loc_a: Location<Self::GPR, Self::SIMD>,
2165        loc_b: Location<Self::GPR, Self::SIMD>,
2166        ret: Location<Self::GPR, Self::SIMD>,
2167    ) -> Result<(), CompileError>;
2168    /// Add 2 F64 values
2169    fn f64_add(
2170        &mut self,
2171        loc_a: Location<Self::GPR, Self::SIMD>,
2172        loc_b: Location<Self::GPR, Self::SIMD>,
2173        ret: Location<Self::GPR, Self::SIMD>,
2174    ) -> Result<(), CompileError>;
2175    /// Sub 2 F64 values
2176    fn f64_sub(
2177        &mut self,
2178        loc_a: Location<Self::GPR, Self::SIMD>,
2179        loc_b: Location<Self::GPR, Self::SIMD>,
2180        ret: Location<Self::GPR, Self::SIMD>,
2181    ) -> Result<(), CompileError>;
2182    /// Multiply 2 F64 values
2183    fn f64_mul(
2184        &mut self,
2185        loc_a: Location<Self::GPR, Self::SIMD>,
2186        loc_b: Location<Self::GPR, Self::SIMD>,
2187        ret: Location<Self::GPR, Self::SIMD>,
2188    ) -> Result<(), CompileError>;
2189    /// Divide 2 F64 values
2190    fn f64_div(
2191        &mut self,
2192        loc_a: Location<Self::GPR, Self::SIMD>,
2193        loc_b: Location<Self::GPR, Self::SIMD>,
2194        ret: Location<Self::GPR, Self::SIMD>,
2195    ) -> Result<(), CompileError>;
2196    /// Negate an F32
2197    fn f32_neg(
2198        &mut self,
2199        loc: Location<Self::GPR, Self::SIMD>,
2200        ret: Location<Self::GPR, Self::SIMD>,
2201    ) -> Result<(), CompileError>;
2202    /// Get the Absolute Value of an F32
2203    fn f32_abs(
2204        &mut self,
2205        loc: Location<Self::GPR, Self::SIMD>,
2206        ret: Location<Self::GPR, Self::SIMD>,
2207    ) -> Result<(), CompileError>;
2208    /// Copy sign from tmp1 Self::GPR to tmp2 Self::GPR
2209    fn emit_i32_copysign(&mut self, tmp1: Self::GPR, tmp2: Self::GPR) -> Result<(), CompileError>;
2210    /// Get the Square Root of an F32
2211    fn f32_sqrt(
2212        &mut self,
2213        loc: Location<Self::GPR, Self::SIMD>,
2214        ret: Location<Self::GPR, Self::SIMD>,
2215    ) -> Result<(), CompileError>;
2216    /// Trunc of an F32
2217    fn f32_trunc(
2218        &mut self,
2219        loc: Location<Self::GPR, Self::SIMD>,
2220        ret: Location<Self::GPR, Self::SIMD>,
2221    ) -> Result<(), CompileError>;
2222    /// Ceil of an F32
2223    fn f32_ceil(
2224        &mut self,
2225        loc: Location<Self::GPR, Self::SIMD>,
2226        ret: Location<Self::GPR, Self::SIMD>,
2227    ) -> Result<(), CompileError>;
2228    /// Floor of an F32
2229    fn f32_floor(
2230        &mut self,
2231        loc: Location<Self::GPR, Self::SIMD>,
2232        ret: Location<Self::GPR, Self::SIMD>,
2233    ) -> Result<(), CompileError>;
2234    /// Round at nearest int of an F32
2235    fn f32_nearest(
2236        &mut self,
2237        loc: Location<Self::GPR, Self::SIMD>,
2238        ret: Location<Self::GPR, Self::SIMD>,
2239    ) -> Result<(), CompileError>;
2240    /// Greater of Equal Compare 2 F32, result in a GPR
2241    fn f32_cmp_ge(
2242        &mut self,
2243        loc_a: Location<Self::GPR, Self::SIMD>,
2244        loc_b: Location<Self::GPR, Self::SIMD>,
2245        ret: Location<Self::GPR, Self::SIMD>,
2246    ) -> Result<(), CompileError>;
2247    /// Greater Than Compare 2 F32, result in a GPR
2248    fn f32_cmp_gt(
2249        &mut self,
2250        loc_a: Location<Self::GPR, Self::SIMD>,
2251        loc_b: Location<Self::GPR, Self::SIMD>,
2252        ret: Location<Self::GPR, Self::SIMD>,
2253    ) -> Result<(), CompileError>;
2254    /// Less of Equal Compare 2 F32, result in a GPR
2255    fn f32_cmp_le(
2256        &mut self,
2257        loc_a: Location<Self::GPR, Self::SIMD>,
2258        loc_b: Location<Self::GPR, Self::SIMD>,
2259        ret: Location<Self::GPR, Self::SIMD>,
2260    ) -> Result<(), CompileError>;
2261    /// Less Than Compare 2 F32, result in a GPR
2262    fn f32_cmp_lt(
2263        &mut self,
2264        loc_a: Location<Self::GPR, Self::SIMD>,
2265        loc_b: Location<Self::GPR, Self::SIMD>,
2266        ret: Location<Self::GPR, Self::SIMD>,
2267    ) -> Result<(), CompileError>;
2268    /// Not Equal Compare 2 F32, result in a GPR
2269    fn f32_cmp_ne(
2270        &mut self,
2271        loc_a: Location<Self::GPR, Self::SIMD>,
2272        loc_b: Location<Self::GPR, Self::SIMD>,
2273        ret: Location<Self::GPR, Self::SIMD>,
2274    ) -> Result<(), CompileError>;
2275    /// Equal Compare 2 F32, result in a GPR
2276    fn f32_cmp_eq(
2277        &mut self,
2278        loc_a: Location<Self::GPR, Self::SIMD>,
2279        loc_b: Location<Self::GPR, Self::SIMD>,
2280        ret: Location<Self::GPR, Self::SIMD>,
2281    ) -> Result<(), CompileError>;
2282    /// get Min for 2 F32 values
2283    fn f32_min(
2284        &mut self,
2285        loc_a: Location<Self::GPR, Self::SIMD>,
2286        loc_b: Location<Self::GPR, Self::SIMD>,
2287        ret: Location<Self::GPR, Self::SIMD>,
2288    ) -> Result<(), CompileError>;
2289    /// get Max for 2 F32 values
2290    fn f32_max(
2291        &mut self,
2292        loc_a: Location<Self::GPR, Self::SIMD>,
2293        loc_b: Location<Self::GPR, Self::SIMD>,
2294        ret: Location<Self::GPR, Self::SIMD>,
2295    ) -> Result<(), CompileError>;
2296    /// Add 2 F32 values
2297    fn f32_add(
2298        &mut self,
2299        loc_a: Location<Self::GPR, Self::SIMD>,
2300        loc_b: Location<Self::GPR, Self::SIMD>,
2301        ret: Location<Self::GPR, Self::SIMD>,
2302    ) -> Result<(), CompileError>;
2303    /// Sub 2 F32 values
2304    fn f32_sub(
2305        &mut self,
2306        loc_a: Location<Self::GPR, Self::SIMD>,
2307        loc_b: Location<Self::GPR, Self::SIMD>,
2308        ret: Location<Self::GPR, Self::SIMD>,
2309    ) -> Result<(), CompileError>;
2310    /// Multiply 2 F32 values
2311    fn f32_mul(
2312        &mut self,
2313        loc_a: Location<Self::GPR, Self::SIMD>,
2314        loc_b: Location<Self::GPR, Self::SIMD>,
2315        ret: Location<Self::GPR, Self::SIMD>,
2316    ) -> Result<(), CompileError>;
2317    /// Divide 2 F32 values
2318    fn f32_div(
2319        &mut self,
2320        loc_a: Location<Self::GPR, Self::SIMD>,
2321        loc_b: Location<Self::GPR, Self::SIMD>,
2322        ret: Location<Self::GPR, Self::SIMD>,
2323    ) -> Result<(), CompileError>;
2324
2325    /// Standard function Trampoline generation
2326    fn gen_std_trampoline(
2327        &self,
2328        sig: &FunctionType,
2329        calling_convention: CallingConvention,
2330    ) -> Result<FunctionBody, CompileError>;
2331    /// Generates dynamic import function call trampoline for a function type.
2332    fn gen_std_dynamic_import_trampoline(
2333        &self,
2334        vmoffsets: &VMOffsets,
2335        sig: &FunctionType,
2336        calling_convention: CallingConvention,
2337    ) -> Result<FunctionBody, CompileError>;
2338    /// Singlepass calls import functions through a trampoline.
2339    fn gen_import_call_trampoline(
2340        &self,
2341        vmoffsets: &VMOffsets,
2342        index: FunctionIndex,
2343        sig: &FunctionType,
2344        calling_convention: CallingConvention,
2345    ) -> Result<CustomSection, CompileError>;
2346    /// generate eh_frame instruction (or None if not possible / supported)
2347    fn gen_dwarf_unwind_info(&mut self, code_len: usize) -> Option<UnwindInstructions>;
2348    /// generate Windows unwind instructions (or None if not possible / supported)
2349    fn gen_windows_unwind_info(&mut self, code_len: usize) -> Option<Vec<u8>>;
2350}
2351
2352/// Standard entry trampoline generation
2353pub fn gen_std_trampoline(
2354    sig: &FunctionType,
2355    target: &Target,
2356    calling_convention: CallingConvention,
2357) -> Result<FunctionBody, CompileError> {
2358    match target.triple().architecture {
2359        Architecture::X86_64 => {
2360            let machine = MachineX86_64::new(Some(target.clone()))?;
2361            machine.gen_std_trampoline(sig, calling_convention)
2362        }
2363        Architecture::Aarch64(_) => {
2364            let machine = MachineARM64::new(Some(target.clone()));
2365            machine.gen_std_trampoline(sig, calling_convention)
2366        }
2367        _ => Err(CompileError::UnsupportedTarget(
2368            "singlepass unimplemented arch for gen_std_trampoline".to_owned(),
2369        )),
2370    }
2371}
2372
2373/// Generates dynamic import function call trampoline for a function type.
2374pub fn gen_std_dynamic_import_trampoline(
2375    vmoffsets: &VMOffsets,
2376    sig: &FunctionType,
2377    target: &Target,
2378    calling_convention: CallingConvention,
2379) -> Result<FunctionBody, CompileError> {
2380    match target.triple().architecture {
2381        Architecture::X86_64 => {
2382            let machine = MachineX86_64::new(Some(target.clone()))?;
2383            machine.gen_std_dynamic_import_trampoline(vmoffsets, sig, calling_convention)
2384        }
2385        Architecture::Aarch64(_) => {
2386            let machine = MachineARM64::new(Some(target.clone()));
2387            machine.gen_std_dynamic_import_trampoline(vmoffsets, sig, calling_convention)
2388        }
2389        _ => Err(CompileError::UnsupportedTarget(
2390            "singlepass unimplemented arch for gen_std_dynamic_import_trampoline".to_owned(),
2391        )),
2392    }
2393}
2394/// Singlepass calls import functions through a trampoline.
2395pub fn gen_import_call_trampoline(
2396    vmoffsets: &VMOffsets,
2397    index: FunctionIndex,
2398    sig: &FunctionType,
2399    target: &Target,
2400    calling_convention: CallingConvention,
2401) -> Result<CustomSection, CompileError> {
2402    match target.triple().architecture {
2403        Architecture::X86_64 => {
2404            let machine = MachineX86_64::new(Some(target.clone()))?;
2405            machine.gen_import_call_trampoline(vmoffsets, index, sig, calling_convention)
2406        }
2407        Architecture::Aarch64(_) => {
2408            let machine = MachineARM64::new(Some(target.clone()));
2409            machine.gen_import_call_trampoline(vmoffsets, index, sig, calling_convention)
2410        }
2411        _ => Err(CompileError::UnsupportedTarget(
2412            "singlepass unimplemented arch for gen_import_call_trampoline".to_owned(),
2413        )),
2414    }
2415}
2416
2417// Constants for the bounds of truncation operations. These are the least or
2418// greatest exact floats in either f32 or f64 representation less-than (for
2419// least) or greater-than (for greatest) the i32 or i64 or u32 or u64
2420// min (for least) or max (for greatest), when rounding towards zero.
2421
2422/// Greatest Exact Float (32 bits) less-than i32::MIN when rounding towards zero.
2423pub const GEF32_LT_I32_MIN: f32 = -2147483904.0;
2424/// Least Exact Float (32 bits) greater-than i32::MAX when rounding towards zero.
2425pub const LEF32_GT_I32_MAX: f32 = 2147483648.0;
2426/// Greatest Exact Float (32 bits) less-than i64::MIN when rounding towards zero.
2427pub const GEF32_LT_I64_MIN: f32 = -9223373136366403584.0;
2428/// Least Exact Float (32 bits) greater-than i64::MAX when rounding towards zero.
2429pub const LEF32_GT_I64_MAX: f32 = 9223372036854775808.0;
2430/// Greatest Exact Float (32 bits) less-than u32::MIN when rounding towards zero.
2431pub const GEF32_LT_U32_MIN: f32 = -1.0;
2432/// Least Exact Float (32 bits) greater-than u32::MAX when rounding towards zero.
2433pub const LEF32_GT_U32_MAX: f32 = 4294967296.0;
2434/// Greatest Exact Float (32 bits) less-than u64::MIN when rounding towards zero.
2435pub const GEF32_LT_U64_MIN: f32 = -1.0;
2436/// Least Exact Float (32 bits) greater-than u64::MAX when rounding towards zero.
2437pub const LEF32_GT_U64_MAX: f32 = 18446744073709551616.0;
2438
2439/// Greatest Exact Float (64 bits) less-than i32::MIN when rounding towards zero.
2440pub const GEF64_LT_I32_MIN: f64 = -2147483649.0;
2441/// Least Exact Float (64 bits) greater-than i32::MAX when rounding towards zero.
2442pub const LEF64_GT_I32_MAX: f64 = 2147483648.0;
2443/// Greatest Exact Float (64 bits) less-than i64::MIN when rounding towards zero.
2444pub const GEF64_LT_I64_MIN: f64 = -9223372036854777856.0;
2445/// Least Exact Float (64 bits) greater-than i64::MAX when rounding towards zero.
2446pub const LEF64_GT_I64_MAX: f64 = 9223372036854775808.0;
2447/// Greatest Exact Float (64 bits) less-than u32::MIN when rounding towards zero.
2448pub const GEF64_LT_U32_MIN: f64 = -1.0;
2449/// Least Exact Float (64 bits) greater-than u32::MAX when rounding towards zero.
2450pub const LEF64_GT_U32_MAX: f64 = 4294967296.0;
2451/// Greatest Exact Float (64 bits) less-than u64::MIN when rounding towards zero.
2452pub const GEF64_LT_U64_MIN: f64 = -1.0;
2453/// Least Exact Float (64 bits) greater-than u64::MAX when rounding towards zero.
2454pub const LEF64_GT_U64_MAX: f64 = 18446744073709551616.0;