wasmer_compiler_singlepass/
machine.rs

1use crate::{
2    common_decl::*,
3    location::{Location, Reg},
4    machine_arm64::MachineARM64,
5    machine_riscv::MachineRiscv,
6    machine_x64::MachineX86_64,
7    unwind::UnwindInstructions,
8};
9
10use dynasmrt::{AssemblyOffset, DynamicLabel};
11use std::{
12    collections::{BTreeMap, HashMap},
13    fmt::Debug,
14};
15use wasmer_compiler::{
16    types::{
17        address_map::InstructionAddressMap,
18        function::FunctionBody,
19        relocation::{Relocation, RelocationTarget},
20        section::CustomSection,
21    },
22    wasmparser::MemArg,
23};
24use wasmer_types::{
25    CompileError, FunctionIndex, FunctionType, TrapCode, TrapInformation, VMOffsets,
26    target::{Architecture, CallingConvention, Target},
27};
28pub type Label = DynamicLabel;
29pub type Offset = AssemblyOffset;
30
31#[allow(dead_code)]
32#[derive(Clone, PartialEq)]
33pub enum Value {
34    I8(i8),
35    I32(i32),
36    I64(i64),
37    F32(f32),
38    F64(f64),
39}
40
41#[macro_export]
42macro_rules! codegen_error {
43    ($($arg:tt)*) => {return Err(CompileError::Codegen(format!($($arg)*)))}
44}
45
46#[allow(unused)]
47pub trait MaybeImmediate {
48    fn imm_value(&self) -> Option<Value>;
49    fn is_imm(&self) -> bool {
50        self.imm_value().is_some()
51    }
52    fn imm_value_scalar(&self) -> Option<i64>;
53}
54
55/// A trap table for a `RunnableModuleInfo`.
56#[derive(Clone, Debug, Default)]
57pub struct TrapTable {
58    /// Mappings from offsets in generated machine code to the corresponding trap code.
59    pub offset_to_code: BTreeMap<usize, TrapCode>,
60}
61
62// all machine seems to have a page this size, so not per arch for now
63pub const NATIVE_PAGE_SIZE: usize = 4096;
64
65#[allow(dead_code)]
66pub enum UnsignedCondition {
67    Equal,
68    NotEqual,
69    Above,
70    AboveEqual,
71    Below,
72    BelowEqual,
73}
74
75#[derive(Debug, Clone)]
76pub enum AssemblyComment {
77    FunctionPrologue,
78    InitializeLocals,
79    TrapHandlersTable,
80    RedZone,
81    FunctionBody,
82}
83
84impl std::fmt::Display for AssemblyComment {
85    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
86        match self {
87            AssemblyComment::FunctionPrologue => write!(f, "function prologue"),
88            AssemblyComment::InitializeLocals => write!(f, "initialize locals"),
89            AssemblyComment::TrapHandlersTable => write!(f, "trap handlers table"),
90            AssemblyComment::RedZone => write!(f, "red zone"),
91            AssemblyComment::FunctionBody => write!(f, "body"),
92        }
93    }
94}
95
96pub(crate) struct FinalizedAssembly {
97    pub(crate) body: Vec<u8>,
98    pub(crate) assembly_comments: HashMap<usize, AssemblyComment>,
99}
100
101#[allow(unused)]
102pub trait Machine {
103    type GPR: Copy + Eq + Debug + Reg;
104    type SIMD: Copy + Eq + Debug + Reg;
105    /// Get current assembler offset
106    fn assembler_get_offset(&self) -> Offset;
107    /// Get the GPR that hold vmctx
108    fn get_vmctx_reg(&self) -> Self::GPR;
109    /// Picks an unused general purpose register for local/stack/argument use.
110    ///
111    /// This method does not mark the register as used
112    fn pick_gpr(&self) -> Option<Self::GPR>;
113    /// Picks an unused general purpose register for internal temporary use.
114    ///
115    /// This method does not mark the register as used
116    fn pick_temp_gpr(&self) -> Option<Self::GPR>;
117    /// Get all used GPR
118    fn get_used_gprs(&self) -> Vec<Self::GPR>;
119    /// Get all used SIMD regs
120    fn get_used_simd(&self) -> Vec<Self::SIMD>;
121    /// Picks an unused general pupose register and mark it as used
122    fn acquire_temp_gpr(&mut self) -> Option<Self::GPR>;
123    /// Releases a temporary GPR.
124    fn release_gpr(&mut self, gpr: Self::GPR);
125    /// Specify that a given register is in use.
126    fn reserve_unused_temp_gpr(&mut self, gpr: Self::GPR) -> Self::GPR;
127    /// reserve a GPR
128    fn reserve_gpr(&mut self, gpr: Self::GPR);
129    /// Push used gpr to the stack. Return the bytes taken on the stack.
130    fn push_used_gpr(&mut self, gprs: &[Self::GPR]) -> Result<usize, CompileError>;
131    /// Pop used gpr from the stack.
132    fn pop_used_gpr(&mut self, gprs: &[Self::GPR]) -> Result<(), CompileError>;
133    /// Picks an unused SIMD register.
134    ///
135    /// This method does not mark the register as used
136    fn pick_simd(&self) -> Option<Self::SIMD>;
137    /// Picks an unused SIMD register for internal temporary use.
138    ///
139    /// This method does not mark the register as used
140    fn pick_temp_simd(&self) -> Option<Self::SIMD>;
141    /// Acquires a temporary XMM register.
142    fn acquire_temp_simd(&mut self) -> Option<Self::SIMD>;
143    /// reserve a SIMD register
144    fn reserve_simd(&mut self, simd: Self::SIMD);
145    /// Releases a temporary XMM register.
146    fn release_simd(&mut self, simd: Self::SIMD);
147    /// Push used simd regs to the stack. Return bytes taken on the stack
148    fn push_used_simd(&mut self, simds: &[Self::SIMD]) -> Result<usize, CompileError>;
149    /// Pop used simd regs to the stack
150    fn pop_used_simd(&mut self, simds: &[Self::SIMD]) -> Result<(), CompileError>;
151    /// Return a rounded stack adjustement value (must be multiple of 16bytes on ARM64 for example)
152    fn round_stack_adjust(&self, value: usize) -> usize;
153    /// Set the source location of the Wasm to the given offset.
154    fn set_srcloc(&mut self, offset: u32);
155    /// Marks each address in the code range emitted by `f` with the trap code `code`.
156    fn mark_address_range_with_trap_code(&mut self, code: TrapCode, begin: usize, end: usize);
157    /// Marks one address as trappable with trap code `code`.
158    fn mark_address_with_trap_code(&mut self, code: TrapCode);
159    /// Marks the instruction as trappable with trap code `code`. return "begin" offset
160    fn mark_instruction_with_trap_code(&mut self, code: TrapCode) -> usize;
161    /// Pushes the instruction to the address map, calculating the offset from a
162    /// provided beginning address.
163    fn mark_instruction_address_end(&mut self, begin: usize);
164    /// Insert a StackOverflow (at offset 0)
165    fn insert_stackoverflow(&mut self);
166    /// Get all current TrapInformation
167    fn collect_trap_information(&self) -> Vec<TrapInformation>;
168    // Get all intructions address map
169    fn instructions_address_map(&self) -> Vec<InstructionAddressMap>;
170    /// Memory location for a local on the stack
171    /// Like Location::Memory(GPR::RBP, -(self.stack_offset.0 as i32)) for x86_64
172    fn local_on_stack(&mut self, stack_offset: i32) -> Location<Self::GPR, Self::SIMD>;
173    /// Allocate an extra space on the stack.
174    fn extend_stack(&mut self, delta_stack_offset: u32) -> Result<(), CompileError>;
175    /// Truncate stack space by the `delta_stack_offset`.
176    fn truncate_stack(&mut self, delta_stack_offset: u32) -> Result<(), CompileError>;
177    /// Zero a location taht is 32bits
178    fn zero_location(
179        &mut self,
180        size: Size,
181        location: Location<Self::GPR, Self::SIMD>,
182    ) -> Result<(), CompileError>;
183    /// GPR Reg used for local pointer on the stack
184    fn local_pointer(&self) -> Self::GPR;
185    /// push a value on the stack for a native call
186    fn move_location_for_native(
187        &mut self,
188        size: Size,
189        loc: Location<Self::GPR, Self::SIMD>,
190        dest: Location<Self::GPR, Self::SIMD>,
191    ) -> Result<(), CompileError>;
192    /// Determine whether a local should be allocated on the stack.
193    fn is_local_on_stack(&self, idx: usize) -> bool;
194    /// Determine a local's location.
195    fn get_local_location(
196        &self,
197        idx: usize,
198        callee_saved_regs_size: usize,
199    ) -> Location<Self::GPR, Self::SIMD>;
200    /// Move a local to the stack
201    /// Like emit_mov(Size::S64, location, Location::Memory(GPR::RBP, -(self.stack_offset.0 as i32)));
202    fn move_local(
203        &mut self,
204        stack_offset: i32,
205        location: Location<Self::GPR, Self::SIMD>,
206    ) -> Result<(), CompileError>;
207    /// List of register to save, depending on the CallingConvention
208    fn list_to_save(
209        &self,
210        calling_convention: CallingConvention,
211    ) -> Vec<Location<Self::GPR, Self::SIMD>>;
212    /// Get registers for first N function call parameters.
213    fn get_param_registers(&self, calling_convention: CallingConvention) -> &'static [Self::GPR];
214    /// Get param location (to build a call, using SP for stack args)
215    fn get_param_location(
216        &self,
217        idx: usize,
218        sz: Size,
219        stack_offset: &mut usize,
220        calling_convention: CallingConvention,
221    ) -> Location<Self::GPR, Self::SIMD>;
222    /// Get call param location (from a call, using FP for stack args)
223    fn get_call_param_location(
224        &self,
225        result_slots: usize,
226        idx: usize,
227        sz: Size,
228        stack_offset: &mut usize,
229        calling_convention: CallingConvention,
230    ) -> Location<Self::GPR, Self::SIMD>;
231    /// Get param location (idx must point to an argument that is passed in a GPR).
232    fn get_simple_param_location(
233        &self,
234        idx: usize,
235        calling_convention: CallingConvention,
236    ) -> Self::GPR;
237    /// Adjust GPR param for calling convention ABI purpose.
238    fn adjust_gpr_param_location(
239        &mut self,
240        register: Self::GPR,
241        size: Size,
242    ) -> Result<(), CompileError>;
243    /// Get return value location (to build a call, using SP for stack return values).
244    fn get_return_value_location(
245        &self,
246        idx: usize,
247        stack_location: &mut usize,
248        calling_convention: CallingConvention,
249    ) -> Location<Self::GPR, Self::SIMD>;
250    /// Get return value location (from a call, using FP for stack return values).
251    fn get_call_return_value_location(
252        &self,
253        idx: usize,
254        calling_convention: CallingConvention,
255    ) -> Location<Self::GPR, Self::SIMD>;
256    /// move a location to another
257    fn move_location(
258        &mut self,
259        size: Size,
260        source: Location<Self::GPR, Self::SIMD>,
261        dest: Location<Self::GPR, Self::SIMD>,
262    ) -> Result<(), CompileError>;
263    /// move a location to another, with zero or sign extension
264    fn move_location_extend(
265        &mut self,
266        size_val: Size,
267        signed: bool,
268        source: Location<Self::GPR, Self::SIMD>,
269        size_op: Size,
270        dest: Location<Self::GPR, Self::SIMD>,
271    ) -> Result<(), CompileError>;
272    /// Init the stack loc counter
273    fn init_stack_loc(
274        &mut self,
275        init_stack_loc_cnt: u64,
276        last_stack_loc: Location<Self::GPR, Self::SIMD>,
277    ) -> Result<(), CompileError>;
278    /// Restore save_area
279    fn restore_saved_area(&mut self, saved_area_offset: i32) -> Result<(), CompileError>;
280    /// Pop a location
281    fn pop_location(
282        &mut self,
283        location: Location<Self::GPR, Self::SIMD>,
284    ) -> Result<(), CompileError>;
285
286    /// Finalize the assembler
287    fn assembler_finalize(
288        self,
289        assembly_comments: HashMap<usize, AssemblyComment>,
290    ) -> Result<FinalizedAssembly, CompileError>;
291
292    /// get_offset of Assembler
293    fn get_offset(&self) -> Offset;
294
295    /// finalize a function
296    fn finalize_function(&mut self) -> Result<(), CompileError>;
297
298    /// emit native function prolog (depending on the calling Convention, like "PUSH RBP / MOV RSP, RBP")
299    fn emit_function_prolog(&mut self) -> Result<(), CompileError>;
300    /// emit native function epilog (depending on the calling Convention, like "MOV RBP, RSP / POP RBP")
301    fn emit_function_epilog(&mut self) -> Result<(), CompileError>;
302    /// Handle copy to SIMD register from ret value (if needed by the arch/calling convention)
303    fn emit_function_return_float(&mut self) -> Result<(), CompileError>;
304    /// Cannonicalize a NaN (or panic if not supported)
305    fn canonicalize_nan(
306        &mut self,
307        sz: Size,
308        input: Location<Self::GPR, Self::SIMD>,
309        output: Location<Self::GPR, Self::SIMD>,
310    ) -> Result<(), CompileError>;
311
312    /// emit an Illegal Opcode, associated with a trapcode
313    fn emit_illegal_op(&mut self, trp: TrapCode) -> Result<(), CompileError>;
314    /// create a new label
315    fn get_label(&mut self) -> Label;
316    /// emit a label
317    fn emit_label(&mut self, label: Label) -> Result<(), CompileError>;
318
319    /// get the gpr used for call. like RAX on x86_64
320    fn get_gpr_for_call(&self) -> Self::GPR;
321    /// Emit a call using the value in register
322    fn emit_call_register(&mut self, register: Self::GPR) -> Result<(), CompileError>;
323    /// Emit a call to a label
324    fn emit_call_label(&mut self, label: Label) -> Result<(), CompileError>;
325    /// indirect call with trampoline
326    fn arch_emit_indirect_call_with_trampoline(
327        &mut self,
328        location: Location<Self::GPR, Self::SIMD>,
329    ) -> Result<(), CompileError>;
330    /// emit a call to a location
331    fn emit_call_location(
332        &mut self,
333        location: Location<Self::GPR, Self::SIMD>,
334    ) -> Result<(), CompileError>;
335
336    /// Emit a debug breakpoint
337    fn emit_debug_breakpoint(&mut self) -> Result<(), CompileError>;
338
339    /// Add src+dst -> dst (with or without flags)
340    fn location_add(
341        &mut self,
342        size: Size,
343        source: Location<Self::GPR, Self::SIMD>,
344        dest: Location<Self::GPR, Self::SIMD>,
345        flags: bool,
346    ) -> Result<(), CompileError>;
347
348    /// Cmp src - dst and set flags
349    fn location_cmp(
350        &mut self,
351        size: Size,
352        source: Location<Self::GPR, Self::SIMD>,
353        dest: Location<Self::GPR, Self::SIMD>,
354    ) -> Result<(), CompileError>;
355
356    /// jmp without condidtion
357    fn jmp_unconditional(&mut self, label: Label) -> Result<(), CompileError>;
358
359    /// jmp to label if the provided condition is true (when comparing loc_a and loc_b)
360    fn jmp_on_condition(
361        &mut self,
362        cond: UnsignedCondition,
363        size: Size,
364        loc_a: Location<Self::GPR, Self::SIMD>,
365        loc_b: Location<Self::GPR, Self::SIMD>,
366        label: Label,
367    ) -> Result<(), CompileError>;
368
369    /// jmp using a jump table at lable with cond as the indice
370    fn emit_jmp_to_jumptable(
371        &mut self,
372        label: Label,
373        cond: Location<Self::GPR, Self::SIMD>,
374    ) -> Result<(), CompileError>;
375
376    /// Align for Loop (may do nothing, depending on the arch)
377    fn align_for_loop(&mut self) -> Result<(), CompileError>;
378
379    /// ret (from a Call)
380    fn emit_ret(&mut self) -> Result<(), CompileError>;
381
382    /// Stack push of a location
383    fn emit_push(
384        &mut self,
385        size: Size,
386        loc: Location<Self::GPR, Self::SIMD>,
387    ) -> Result<(), CompileError>;
388    /// Stack pop of a location
389    fn emit_pop(
390        &mut self,
391        size: Size,
392        loc: Location<Self::GPR, Self::SIMD>,
393    ) -> Result<(), CompileError>;
394    /// relaxed mov: move from anywhere to anywhere
395    fn emit_relaxed_mov(
396        &mut self,
397        sz: Size,
398        src: Location<Self::GPR, Self::SIMD>,
399        dst: Location<Self::GPR, Self::SIMD>,
400    ) -> Result<(), CompileError>;
401    /// relaxed cmp: compare from anywhere and anywhere
402    fn emit_relaxed_cmp(
403        &mut self,
404        sz: Size,
405        src: Location<Self::GPR, Self::SIMD>,
406        dst: Location<Self::GPR, Self::SIMD>,
407    ) -> Result<(), CompileError>;
408    /// Emit a memory fence. Can be nothing for x86_64 or a DMB on ARM64 for example
409    fn emit_memory_fence(&mut self) -> Result<(), CompileError>;
410    /// relaxed move with sign extension
411    fn emit_relaxed_sign_extension(
412        &mut self,
413        sz_src: Size,
414        src: Location<Self::GPR, Self::SIMD>,
415        sz_dst: Size,
416        dst: Location<Self::GPR, Self::SIMD>,
417    ) -> Result<(), CompileError>;
418    /// Multiply location with immediate
419    fn emit_imul_imm32(
420        &mut self,
421        size: Size,
422        imm32: u32,
423        gpr: Self::GPR,
424    ) -> Result<(), CompileError>;
425    /// Add with location directly from the stack
426    fn emit_binop_add32(
427        &mut self,
428        loc_a: Location<Self::GPR, Self::SIMD>,
429        loc_b: Location<Self::GPR, Self::SIMD>,
430        ret: Location<Self::GPR, Self::SIMD>,
431    ) -> Result<(), CompileError>;
432    /// Sub with location directly from the stack
433    fn emit_binop_sub32(
434        &mut self,
435        loc_a: Location<Self::GPR, Self::SIMD>,
436        loc_b: Location<Self::GPR, Self::SIMD>,
437        ret: Location<Self::GPR, Self::SIMD>,
438    ) -> Result<(), CompileError>;
439    /// Multiply with location directly from the stack
440    fn emit_binop_mul32(
441        &mut self,
442        loc_a: Location<Self::GPR, Self::SIMD>,
443        loc_b: Location<Self::GPR, Self::SIMD>,
444        ret: Location<Self::GPR, Self::SIMD>,
445    ) -> Result<(), CompileError>;
446    /// Unsigned Division with location directly from the stack. return the offset of the DIV opcode, to mark as trappable.
447    fn emit_binop_udiv32(
448        &mut self,
449        loc_a: Location<Self::GPR, Self::SIMD>,
450        loc_b: Location<Self::GPR, Self::SIMD>,
451        ret: Location<Self::GPR, Self::SIMD>,
452        integer_division_by_zero: Label,
453    ) -> Result<usize, CompileError>;
454    /// Signed Division with location directly from the stack. return the offset of the DIV opcode, to mark as trappable.
455    fn emit_binop_sdiv32(
456        &mut self,
457        loc_a: Location<Self::GPR, Self::SIMD>,
458        loc_b: Location<Self::GPR, Self::SIMD>,
459        ret: Location<Self::GPR, Self::SIMD>,
460        integer_division_by_zero: Label,
461        integer_overflow: Label,
462    ) -> Result<usize, CompileError>;
463    /// Unsigned Reminder (of a division) with location directly from the stack. return the offset of the DIV opcode, to mark as trappable.
464    fn emit_binop_urem32(
465        &mut self,
466        loc_a: Location<Self::GPR, Self::SIMD>,
467        loc_b: Location<Self::GPR, Self::SIMD>,
468        ret: Location<Self::GPR, Self::SIMD>,
469        integer_division_by_zero: Label,
470    ) -> Result<usize, CompileError>;
471    /// Signed Reminder (of a Division) with location directly from the stack. return the offset of the DIV opcode, to mark as trappable.
472    fn emit_binop_srem32(
473        &mut self,
474        loc_a: Location<Self::GPR, Self::SIMD>,
475        loc_b: Location<Self::GPR, Self::SIMD>,
476        ret: Location<Self::GPR, Self::SIMD>,
477        integer_division_by_zero: Label,
478    ) -> Result<usize, CompileError>;
479    /// And with location directly from the stack
480    fn emit_binop_and32(
481        &mut self,
482        loc_a: Location<Self::GPR, Self::SIMD>,
483        loc_b: Location<Self::GPR, Self::SIMD>,
484        ret: Location<Self::GPR, Self::SIMD>,
485    ) -> Result<(), CompileError>;
486    /// Or with location directly from the stack
487    fn emit_binop_or32(
488        &mut self,
489        loc_a: Location<Self::GPR, Self::SIMD>,
490        loc_b: Location<Self::GPR, Self::SIMD>,
491        ret: Location<Self::GPR, Self::SIMD>,
492    ) -> Result<(), CompileError>;
493    /// Xor with location directly from the stack
494    fn emit_binop_xor32(
495        &mut self,
496        loc_a: Location<Self::GPR, Self::SIMD>,
497        loc_b: Location<Self::GPR, Self::SIMD>,
498        ret: Location<Self::GPR, Self::SIMD>,
499    ) -> Result<(), CompileError>;
500    /// Signed Greater of Equal Compare 2 i32, result in a GPR
501    fn i32_cmp_ge_s(
502        &mut self,
503        loc_a: Location<Self::GPR, Self::SIMD>,
504        loc_b: Location<Self::GPR, Self::SIMD>,
505        ret: Location<Self::GPR, Self::SIMD>,
506    ) -> Result<(), CompileError>;
507    /// Signed Greater Than Compare 2 i32, result in a GPR
508    fn i32_cmp_gt_s(
509        &mut self,
510        loc_a: Location<Self::GPR, Self::SIMD>,
511        loc_b: Location<Self::GPR, Self::SIMD>,
512        ret: Location<Self::GPR, Self::SIMD>,
513    ) -> Result<(), CompileError>;
514    /// Signed Less of Equal Compare 2 i32, result in a GPR
515    fn i32_cmp_le_s(
516        &mut self,
517        loc_a: Location<Self::GPR, Self::SIMD>,
518        loc_b: Location<Self::GPR, Self::SIMD>,
519        ret: Location<Self::GPR, Self::SIMD>,
520    ) -> Result<(), CompileError>;
521    /// Signed Less Than Compare 2 i32, result in a GPR
522    fn i32_cmp_lt_s(
523        &mut self,
524        loc_a: Location<Self::GPR, Self::SIMD>,
525        loc_b: Location<Self::GPR, Self::SIMD>,
526        ret: Location<Self::GPR, Self::SIMD>,
527    ) -> Result<(), CompileError>;
528    /// Unsigned Greater of Equal Compare 2 i32, result in a GPR
529    fn i32_cmp_ge_u(
530        &mut self,
531        loc_a: Location<Self::GPR, Self::SIMD>,
532        loc_b: Location<Self::GPR, Self::SIMD>,
533        ret: Location<Self::GPR, Self::SIMD>,
534    ) -> Result<(), CompileError>;
535    /// Unsigned Greater Than Compare 2 i32, result in a GPR
536    fn i32_cmp_gt_u(
537        &mut self,
538        loc_a: Location<Self::GPR, Self::SIMD>,
539        loc_b: Location<Self::GPR, Self::SIMD>,
540        ret: Location<Self::GPR, Self::SIMD>,
541    ) -> Result<(), CompileError>;
542    /// Unsigned Less of Equal Compare 2 i32, result in a GPR
543    fn i32_cmp_le_u(
544        &mut self,
545        loc_a: Location<Self::GPR, Self::SIMD>,
546        loc_b: Location<Self::GPR, Self::SIMD>,
547        ret: Location<Self::GPR, Self::SIMD>,
548    ) -> Result<(), CompileError>;
549    /// Unsigned Less Than Compare 2 i32, result in a GPR
550    fn i32_cmp_lt_u(
551        &mut self,
552        loc_a: Location<Self::GPR, Self::SIMD>,
553        loc_b: Location<Self::GPR, Self::SIMD>,
554        ret: Location<Self::GPR, Self::SIMD>,
555    ) -> Result<(), CompileError>;
556    /// Not Equal Compare 2 i32, result in a GPR
557    fn i32_cmp_ne(
558        &mut self,
559        loc_a: Location<Self::GPR, Self::SIMD>,
560        loc_b: Location<Self::GPR, Self::SIMD>,
561        ret: Location<Self::GPR, Self::SIMD>,
562    ) -> Result<(), CompileError>;
563    /// Equal Compare 2 i32, result in a GPR
564    fn i32_cmp_eq(
565        &mut self,
566        loc_a: Location<Self::GPR, Self::SIMD>,
567        loc_b: Location<Self::GPR, Self::SIMD>,
568        ret: Location<Self::GPR, Self::SIMD>,
569    ) -> Result<(), CompileError>;
570    /// Count Leading 0 bit of an i32
571    fn i32_clz(
572        &mut self,
573        loc: Location<Self::GPR, Self::SIMD>,
574        ret: Location<Self::GPR, Self::SIMD>,
575    ) -> Result<(), CompileError>;
576    /// Count Trailling 0 bit of an i32
577    fn i32_ctz(
578        &mut self,
579        loc: Location<Self::GPR, Self::SIMD>,
580        ret: Location<Self::GPR, Self::SIMD>,
581    ) -> Result<(), CompileError>;
582    /// Count the number of 1 bit of an i32
583    fn i32_popcnt(
584        &mut self,
585        loc: Location<Self::GPR, Self::SIMD>,
586        ret: Location<Self::GPR, Self::SIMD>,
587    ) -> Result<(), CompileError>;
588    /// i32 Logical Shift Left
589    fn i32_shl(
590        &mut self,
591        loc_a: Location<Self::GPR, Self::SIMD>,
592        loc_b: Location<Self::GPR, Self::SIMD>,
593        ret: Location<Self::GPR, Self::SIMD>,
594    ) -> Result<(), CompileError>;
595    /// i32 Logical Shift Right
596    fn i32_shr(
597        &mut self,
598        loc_a: Location<Self::GPR, Self::SIMD>,
599        loc_b: Location<Self::GPR, Self::SIMD>,
600        ret: Location<Self::GPR, Self::SIMD>,
601    ) -> Result<(), CompileError>;
602    /// i32 Arithmetic Shift Right
603    fn i32_sar(
604        &mut self,
605        loc_a: Location<Self::GPR, Self::SIMD>,
606        loc_b: Location<Self::GPR, Self::SIMD>,
607        ret: Location<Self::GPR, Self::SIMD>,
608    ) -> Result<(), CompileError>;
609    /// i32 Roll Left
610    fn i32_rol(
611        &mut self,
612        loc_a: Location<Self::GPR, Self::SIMD>,
613        loc_b: Location<Self::GPR, Self::SIMD>,
614        ret: Location<Self::GPR, Self::SIMD>,
615    ) -> Result<(), CompileError>;
616    /// i32 Roll Right
617    fn i32_ror(
618        &mut self,
619        loc_a: Location<Self::GPR, Self::SIMD>,
620        loc_b: Location<Self::GPR, Self::SIMD>,
621        ret: Location<Self::GPR, Self::SIMD>,
622    ) -> Result<(), CompileError>;
623    /// i32 load
624    #[allow(clippy::too_many_arguments)]
625    fn i32_load(
626        &mut self,
627        addr: Location<Self::GPR, Self::SIMD>,
628        memarg: &MemArg,
629        ret: Location<Self::GPR, Self::SIMD>,
630        need_check: bool,
631        imported_memories: bool,
632        offset: i32,
633        heap_access_oob: Label,
634        unaligned_atomic: Label,
635    ) -> Result<(), CompileError>;
636    /// i32 load of an unsigned 8bits
637    #[allow(clippy::too_many_arguments)]
638    fn i32_load_8u(
639        &mut self,
640        addr: Location<Self::GPR, Self::SIMD>,
641        memarg: &MemArg,
642        ret: Location<Self::GPR, Self::SIMD>,
643        need_check: bool,
644        imported_memories: bool,
645        offset: i32,
646        heap_access_oob: Label,
647        unaligned_atomic: Label,
648    ) -> Result<(), CompileError>;
649    /// i32 load of an signed 8bits
650    #[allow(clippy::too_many_arguments)]
651    fn i32_load_8s(
652        &mut self,
653        addr: Location<Self::GPR, Self::SIMD>,
654        memarg: &MemArg,
655        ret: Location<Self::GPR, Self::SIMD>,
656        need_check: bool,
657        imported_memories: bool,
658        offset: i32,
659        heap_access_oob: Label,
660        unaligned_atomic: Label,
661    ) -> Result<(), CompileError>;
662    /// i32 load of an unsigned 16bits
663    #[allow(clippy::too_many_arguments)]
664    fn i32_load_16u(
665        &mut self,
666        addr: Location<Self::GPR, Self::SIMD>,
667        memarg: &MemArg,
668        ret: Location<Self::GPR, Self::SIMD>,
669        need_check: bool,
670        imported_memories: bool,
671        offset: i32,
672        heap_access_oob: Label,
673        unaligned_atomic: Label,
674    ) -> Result<(), CompileError>;
675    /// i32 load of an signed 16bits
676    #[allow(clippy::too_many_arguments)]
677    fn i32_load_16s(
678        &mut self,
679        addr: Location<Self::GPR, Self::SIMD>,
680        memarg: &MemArg,
681        ret: Location<Self::GPR, Self::SIMD>,
682        need_check: bool,
683        imported_memories: bool,
684        offset: i32,
685        heap_access_oob: Label,
686        unaligned_atomic: Label,
687    ) -> Result<(), CompileError>;
688    /// i32 atomic load
689    #[allow(clippy::too_many_arguments)]
690    fn i32_atomic_load(
691        &mut self,
692        addr: Location<Self::GPR, Self::SIMD>,
693        memarg: &MemArg,
694        ret: Location<Self::GPR, Self::SIMD>,
695        need_check: bool,
696        imported_memories: bool,
697        offset: i32,
698        heap_access_oob: Label,
699        unaligned_atomic: Label,
700    ) -> Result<(), CompileError>;
701    /// i32 atomic load of an unsigned 8bits
702    #[allow(clippy::too_many_arguments)]
703    fn i32_atomic_load_8u(
704        &mut self,
705        addr: Location<Self::GPR, Self::SIMD>,
706        memarg: &MemArg,
707        ret: Location<Self::GPR, Self::SIMD>,
708        need_check: bool,
709        imported_memories: bool,
710        offset: i32,
711        heap_access_oob: Label,
712        unaligned_atomic: Label,
713    ) -> Result<(), CompileError>;
714    /// i32 atomic load of an unsigned 16bits
715    #[allow(clippy::too_many_arguments)]
716    fn i32_atomic_load_16u(
717        &mut self,
718        addr: Location<Self::GPR, Self::SIMD>,
719        memarg: &MemArg,
720        ret: Location<Self::GPR, Self::SIMD>,
721        need_check: bool,
722        imported_memories: bool,
723        offset: i32,
724        heap_access_oob: Label,
725        unaligned_atomic: Label,
726    ) -> Result<(), CompileError>;
727    /// i32 save
728    #[allow(clippy::too_many_arguments)]
729    fn i32_save(
730        &mut self,
731        value: Location<Self::GPR, Self::SIMD>,
732        memarg: &MemArg,
733        addr: Location<Self::GPR, Self::SIMD>,
734        need_check: bool,
735        imported_memories: bool,
736        offset: i32,
737        heap_access_oob: Label,
738        unaligned_atomic: Label,
739    ) -> Result<(), CompileError>;
740    /// i32 save of the lower 8bits
741    #[allow(clippy::too_many_arguments)]
742    fn i32_save_8(
743        &mut self,
744        value: Location<Self::GPR, Self::SIMD>,
745        memarg: &MemArg,
746        addr: Location<Self::GPR, Self::SIMD>,
747        need_check: bool,
748        imported_memories: bool,
749        offset: i32,
750        heap_access_oob: Label,
751        unaligned_atomic: Label,
752    ) -> Result<(), CompileError>;
753    /// i32 save of the lower 16bits
754    #[allow(clippy::too_many_arguments)]
755    fn i32_save_16(
756        &mut self,
757        value: Location<Self::GPR, Self::SIMD>,
758        memarg: &MemArg,
759        addr: Location<Self::GPR, Self::SIMD>,
760        need_check: bool,
761        imported_memories: bool,
762        offset: i32,
763        heap_access_oob: Label,
764        unaligned_atomic: Label,
765    ) -> Result<(), CompileError>;
766    /// i32 atomic save
767    #[allow(clippy::too_many_arguments)]
768    fn i32_atomic_save(
769        &mut self,
770        value: Location<Self::GPR, Self::SIMD>,
771        memarg: &MemArg,
772        addr: Location<Self::GPR, Self::SIMD>,
773        need_check: bool,
774        imported_memories: bool,
775        offset: i32,
776        heap_access_oob: Label,
777        unaligned_atomic: Label,
778    ) -> Result<(), CompileError>;
779    /// i32 atomic save of a the lower 8bits
780    #[allow(clippy::too_many_arguments)]
781    fn i32_atomic_save_8(
782        &mut self,
783        value: Location<Self::GPR, Self::SIMD>,
784        memarg: &MemArg,
785        addr: Location<Self::GPR, Self::SIMD>,
786        need_check: bool,
787        imported_memories: bool,
788        offset: i32,
789        heap_access_oob: Label,
790        unaligned_atomic: Label,
791    ) -> Result<(), CompileError>;
792    /// i32 atomic save of a the lower 16bits
793    #[allow(clippy::too_many_arguments)]
794    fn i32_atomic_save_16(
795        &mut self,
796        value: Location<Self::GPR, Self::SIMD>,
797        memarg: &MemArg,
798        addr: Location<Self::GPR, Self::SIMD>,
799        need_check: bool,
800        imported_memories: bool,
801        offset: i32,
802        heap_access_oob: Label,
803        unaligned_atomic: Label,
804    ) -> Result<(), CompileError>;
805    /// i32 atomic Add with i32
806    #[allow(clippy::too_many_arguments)]
807    fn i32_atomic_add(
808        &mut self,
809        loc: Location<Self::GPR, Self::SIMD>,
810        target: Location<Self::GPR, Self::SIMD>,
811        memarg: &MemArg,
812        ret: Location<Self::GPR, Self::SIMD>,
813        need_check: bool,
814        imported_memories: bool,
815        offset: i32,
816        heap_access_oob: Label,
817        unaligned_atomic: Label,
818    ) -> Result<(), CompileError>;
819    /// i32 atomic Add with unsigned 8bits
820    #[allow(clippy::too_many_arguments)]
821    fn i32_atomic_add_8u(
822        &mut self,
823        loc: Location<Self::GPR, Self::SIMD>,
824        target: Location<Self::GPR, Self::SIMD>,
825        memarg: &MemArg,
826        ret: Location<Self::GPR, Self::SIMD>,
827        need_check: bool,
828        imported_memories: bool,
829        offset: i32,
830        heap_access_oob: Label,
831        unaligned_atomic: Label,
832    ) -> Result<(), CompileError>;
833    /// i32 atomic Add with unsigned 16bits
834    #[allow(clippy::too_many_arguments)]
835    fn i32_atomic_add_16u(
836        &mut self,
837        loc: Location<Self::GPR, Self::SIMD>,
838        target: Location<Self::GPR, Self::SIMD>,
839        memarg: &MemArg,
840        ret: Location<Self::GPR, Self::SIMD>,
841        need_check: bool,
842        imported_memories: bool,
843        offset: i32,
844        heap_access_oob: Label,
845        unaligned_atomic: Label,
846    ) -> Result<(), CompileError>;
847    /// i32 atomic Sub with i32
848    #[allow(clippy::too_many_arguments)]
849    fn i32_atomic_sub(
850        &mut self,
851        loc: Location<Self::GPR, Self::SIMD>,
852        target: Location<Self::GPR, Self::SIMD>,
853        memarg: &MemArg,
854        ret: Location<Self::GPR, Self::SIMD>,
855        need_check: bool,
856        imported_memories: bool,
857        offset: i32,
858        heap_access_oob: Label,
859        unaligned_atomic: Label,
860    ) -> Result<(), CompileError>;
861    /// i32 atomic Sub with unsigned 8bits
862    #[allow(clippy::too_many_arguments)]
863    fn i32_atomic_sub_8u(
864        &mut self,
865        loc: Location<Self::GPR, Self::SIMD>,
866        target: Location<Self::GPR, Self::SIMD>,
867        memarg: &MemArg,
868        ret: Location<Self::GPR, Self::SIMD>,
869        need_check: bool,
870        imported_memories: bool,
871        offset: i32,
872        heap_access_oob: Label,
873        unaligned_atomic: Label,
874    ) -> Result<(), CompileError>;
875    /// i32 atomic Sub with unsigned 16bits
876    #[allow(clippy::too_many_arguments)]
877    fn i32_atomic_sub_16u(
878        &mut self,
879        loc: Location<Self::GPR, Self::SIMD>,
880        target: Location<Self::GPR, Self::SIMD>,
881        memarg: &MemArg,
882        ret: Location<Self::GPR, Self::SIMD>,
883        need_check: bool,
884        imported_memories: bool,
885        offset: i32,
886        heap_access_oob: Label,
887        unaligned_atomic: Label,
888    ) -> Result<(), CompileError>;
889    /// i32 atomic And with i32
890    #[allow(clippy::too_many_arguments)]
891    fn i32_atomic_and(
892        &mut self,
893        loc: Location<Self::GPR, Self::SIMD>,
894        target: Location<Self::GPR, Self::SIMD>,
895        memarg: &MemArg,
896        ret: Location<Self::GPR, Self::SIMD>,
897        need_check: bool,
898        imported_memories: bool,
899        offset: i32,
900        heap_access_oob: Label,
901        unaligned_atomic: Label,
902    ) -> Result<(), CompileError>;
903    /// i32 atomic And with unsigned 8bits
904    #[allow(clippy::too_many_arguments)]
905    fn i32_atomic_and_8u(
906        &mut self,
907        loc: Location<Self::GPR, Self::SIMD>,
908        target: Location<Self::GPR, Self::SIMD>,
909        memarg: &MemArg,
910        ret: Location<Self::GPR, Self::SIMD>,
911        need_check: bool,
912        imported_memories: bool,
913        offset: i32,
914        heap_access_oob: Label,
915        unaligned_atomic: Label,
916    ) -> Result<(), CompileError>;
917    /// i32 atomic And with unsigned 16bits
918    #[allow(clippy::too_many_arguments)]
919    fn i32_atomic_and_16u(
920        &mut self,
921        loc: Location<Self::GPR, Self::SIMD>,
922        target: Location<Self::GPR, Self::SIMD>,
923        memarg: &MemArg,
924        ret: Location<Self::GPR, Self::SIMD>,
925        need_check: bool,
926        imported_memories: bool,
927        offset: i32,
928        heap_access_oob: Label,
929        unaligned_atomic: Label,
930    ) -> Result<(), CompileError>;
931    /// i32 atomic Or with i32
932    #[allow(clippy::too_many_arguments)]
933    fn i32_atomic_or(
934        &mut self,
935        loc: Location<Self::GPR, Self::SIMD>,
936        target: Location<Self::GPR, Self::SIMD>,
937        memarg: &MemArg,
938        ret: Location<Self::GPR, Self::SIMD>,
939        need_check: bool,
940        imported_memories: bool,
941        offset: i32,
942        heap_access_oob: Label,
943        unaligned_atomic: Label,
944    ) -> Result<(), CompileError>;
945    /// i32 atomic Or with unsigned 8bits
946    #[allow(clippy::too_many_arguments)]
947    fn i32_atomic_or_8u(
948        &mut self,
949        loc: Location<Self::GPR, Self::SIMD>,
950        target: Location<Self::GPR, Self::SIMD>,
951        memarg: &MemArg,
952        ret: Location<Self::GPR, Self::SIMD>,
953        need_check: bool,
954        imported_memories: bool,
955        offset: i32,
956        heap_access_oob: Label,
957        unaligned_atomic: Label,
958    ) -> Result<(), CompileError>;
959    /// i32 atomic Or with unsigned 16bits
960    #[allow(clippy::too_many_arguments)]
961    fn i32_atomic_or_16u(
962        &mut self,
963        loc: Location<Self::GPR, Self::SIMD>,
964        target: Location<Self::GPR, Self::SIMD>,
965        memarg: &MemArg,
966        ret: Location<Self::GPR, Self::SIMD>,
967        need_check: bool,
968        imported_memories: bool,
969        offset: i32,
970        heap_access_oob: Label,
971        unaligned_atomic: Label,
972    ) -> Result<(), CompileError>;
973    /// i32 atomic Xor with i32
974    #[allow(clippy::too_many_arguments)]
975    fn i32_atomic_xor(
976        &mut self,
977        loc: Location<Self::GPR, Self::SIMD>,
978        target: Location<Self::GPR, Self::SIMD>,
979        memarg: &MemArg,
980        ret: Location<Self::GPR, Self::SIMD>,
981        need_check: bool,
982        imported_memories: bool,
983        offset: i32,
984        heap_access_oob: Label,
985        unaligned_atomic: Label,
986    ) -> Result<(), CompileError>;
987    /// i32 atomic Xor with unsigned 8bits
988    #[allow(clippy::too_many_arguments)]
989    fn i32_atomic_xor_8u(
990        &mut self,
991        loc: Location<Self::GPR, Self::SIMD>,
992        target: Location<Self::GPR, Self::SIMD>,
993        memarg: &MemArg,
994        ret: Location<Self::GPR, Self::SIMD>,
995        need_check: bool,
996        imported_memories: bool,
997        offset: i32,
998        heap_access_oob: Label,
999        unaligned_atomic: Label,
1000    ) -> Result<(), CompileError>;
1001    /// i32 atomic Xor with unsigned 16bits
1002    #[allow(clippy::too_many_arguments)]
1003    fn i32_atomic_xor_16u(
1004        &mut self,
1005        loc: Location<Self::GPR, Self::SIMD>,
1006        target: Location<Self::GPR, Self::SIMD>,
1007        memarg: &MemArg,
1008        ret: Location<Self::GPR, Self::SIMD>,
1009        need_check: bool,
1010        imported_memories: bool,
1011        offset: i32,
1012        heap_access_oob: Label,
1013        unaligned_atomic: Label,
1014    ) -> Result<(), CompileError>;
1015    /// i32 atomic Exchange with i32
1016    #[allow(clippy::too_many_arguments)]
1017    fn i32_atomic_xchg(
1018        &mut self,
1019        loc: Location<Self::GPR, Self::SIMD>,
1020        target: Location<Self::GPR, Self::SIMD>,
1021        memarg: &MemArg,
1022        ret: Location<Self::GPR, Self::SIMD>,
1023        need_check: bool,
1024        imported_memories: bool,
1025        offset: i32,
1026        heap_access_oob: Label,
1027        unaligned_atomic: Label,
1028    ) -> Result<(), CompileError>;
1029    /// i32 atomic Exchange with u8
1030    #[allow(clippy::too_many_arguments)]
1031    fn i32_atomic_xchg_8u(
1032        &mut self,
1033        loc: Location<Self::GPR, Self::SIMD>,
1034        target: Location<Self::GPR, Self::SIMD>,
1035        memarg: &MemArg,
1036        ret: Location<Self::GPR, Self::SIMD>,
1037        need_check: bool,
1038        imported_memories: bool,
1039        offset: i32,
1040        heap_access_oob: Label,
1041        unaligned_atomic: Label,
1042    ) -> Result<(), CompileError>;
1043    /// i32 atomic Exchange with u16
1044    #[allow(clippy::too_many_arguments)]
1045    fn i32_atomic_xchg_16u(
1046        &mut self,
1047        loc: Location<Self::GPR, Self::SIMD>,
1048        target: Location<Self::GPR, Self::SIMD>,
1049        memarg: &MemArg,
1050        ret: Location<Self::GPR, Self::SIMD>,
1051        need_check: bool,
1052        imported_memories: bool,
1053        offset: i32,
1054        heap_access_oob: Label,
1055        unaligned_atomic: Label,
1056    ) -> Result<(), CompileError>;
1057    /// i32 atomic Compare and Exchange with i32
1058    #[allow(clippy::too_many_arguments)]
1059    fn i32_atomic_cmpxchg(
1060        &mut self,
1061        new: Location<Self::GPR, Self::SIMD>,
1062        cmp: Location<Self::GPR, Self::SIMD>,
1063        target: Location<Self::GPR, Self::SIMD>,
1064        memarg: &MemArg,
1065        ret: Location<Self::GPR, Self::SIMD>,
1066        need_check: bool,
1067        imported_memories: bool,
1068        offset: i32,
1069        heap_access_oob: Label,
1070        unaligned_atomic: Label,
1071    ) -> Result<(), CompileError>;
1072    /// i32 atomic Compare and Exchange with u8
1073    #[allow(clippy::too_many_arguments)]
1074    fn i32_atomic_cmpxchg_8u(
1075        &mut self,
1076        new: Location<Self::GPR, Self::SIMD>,
1077        cmp: Location<Self::GPR, Self::SIMD>,
1078        target: Location<Self::GPR, Self::SIMD>,
1079        memarg: &MemArg,
1080        ret: Location<Self::GPR, Self::SIMD>,
1081        need_check: bool,
1082        imported_memories: bool,
1083        offset: i32,
1084        heap_access_oob: Label,
1085        unaligned_atomic: Label,
1086    ) -> Result<(), CompileError>;
1087    /// i32 atomic Compare and Exchange with u16
1088    #[allow(clippy::too_many_arguments)]
1089    fn i32_atomic_cmpxchg_16u(
1090        &mut self,
1091        new: Location<Self::GPR, Self::SIMD>,
1092        cmp: Location<Self::GPR, Self::SIMD>,
1093        target: Location<Self::GPR, Self::SIMD>,
1094        memarg: &MemArg,
1095        ret: Location<Self::GPR, Self::SIMD>,
1096        need_check: bool,
1097        imported_memories: bool,
1098        offset: i32,
1099        heap_access_oob: Label,
1100        unaligned_atomic: Label,
1101    ) -> Result<(), CompileError>;
1102
1103    /// emit a move function address to GPR ready for call, using appropriate relocation
1104    fn emit_call_with_reloc(
1105        &mut self,
1106        calling_convention: CallingConvention,
1107        reloc_target: RelocationTarget,
1108    ) -> Result<Vec<Relocation>, CompileError>;
1109    /// Add with location directly from the stack
1110    fn emit_binop_add64(
1111        &mut self,
1112        loc_a: Location<Self::GPR, Self::SIMD>,
1113        loc_b: Location<Self::GPR, Self::SIMD>,
1114        ret: Location<Self::GPR, Self::SIMD>,
1115    ) -> Result<(), CompileError>;
1116    /// Sub with location directly from the stack
1117    fn emit_binop_sub64(
1118        &mut self,
1119        loc_a: Location<Self::GPR, Self::SIMD>,
1120        loc_b: Location<Self::GPR, Self::SIMD>,
1121        ret: Location<Self::GPR, Self::SIMD>,
1122    ) -> Result<(), CompileError>;
1123    /// Multiply with location directly from the stack
1124    fn emit_binop_mul64(
1125        &mut self,
1126        loc_a: Location<Self::GPR, Self::SIMD>,
1127        loc_b: Location<Self::GPR, Self::SIMD>,
1128        ret: Location<Self::GPR, Self::SIMD>,
1129    ) -> Result<(), CompileError>;
1130    /// Unsigned Division with location directly from the stack. return the offset of the DIV opcode, to mark as trappable.
1131    fn emit_binop_udiv64(
1132        &mut self,
1133        loc_a: Location<Self::GPR, Self::SIMD>,
1134        loc_b: Location<Self::GPR, Self::SIMD>,
1135        ret: Location<Self::GPR, Self::SIMD>,
1136        integer_division_by_zero: Label,
1137    ) -> Result<usize, CompileError>;
1138    /// Signed Division with location directly from the stack. return the offset of the DIV opcode, to mark as trappable.
1139    fn emit_binop_sdiv64(
1140        &mut self,
1141        loc_a: Location<Self::GPR, Self::SIMD>,
1142        loc_b: Location<Self::GPR, Self::SIMD>,
1143        ret: Location<Self::GPR, Self::SIMD>,
1144        integer_division_by_zero: Label,
1145        integer_overflow: Label,
1146    ) -> Result<usize, CompileError>;
1147    /// Unsigned Reminder (of a division) with location directly from the stack. return the offset of the DIV opcode, to mark as trappable.
1148    fn emit_binop_urem64(
1149        &mut self,
1150        loc_a: Location<Self::GPR, Self::SIMD>,
1151        loc_b: Location<Self::GPR, Self::SIMD>,
1152        ret: Location<Self::GPR, Self::SIMD>,
1153        integer_division_by_zero: Label,
1154    ) -> Result<usize, CompileError>;
1155    /// Signed Reminder (of a Division) with location directly from the stack. return the offset of the DIV opcode, to mark as trappable.
1156    fn emit_binop_srem64(
1157        &mut self,
1158        loc_a: Location<Self::GPR, Self::SIMD>,
1159        loc_b: Location<Self::GPR, Self::SIMD>,
1160        ret: Location<Self::GPR, Self::SIMD>,
1161        integer_division_by_zero: Label,
1162    ) -> Result<usize, CompileError>;
1163    /// And with location directly from the stack
1164    fn emit_binop_and64(
1165        &mut self,
1166        loc_a: Location<Self::GPR, Self::SIMD>,
1167        loc_b: Location<Self::GPR, Self::SIMD>,
1168        ret: Location<Self::GPR, Self::SIMD>,
1169    ) -> Result<(), CompileError>;
1170    /// Or with location directly from the stack
1171    fn emit_binop_or64(
1172        &mut self,
1173        loc_a: Location<Self::GPR, Self::SIMD>,
1174        loc_b: Location<Self::GPR, Self::SIMD>,
1175        ret: Location<Self::GPR, Self::SIMD>,
1176    ) -> Result<(), CompileError>;
1177    /// Xor with location directly from the stack
1178    fn emit_binop_xor64(
1179        &mut self,
1180        loc_a: Location<Self::GPR, Self::SIMD>,
1181        loc_b: Location<Self::GPR, Self::SIMD>,
1182        ret: Location<Self::GPR, Self::SIMD>,
1183    ) -> Result<(), CompileError>;
1184    /// Signed Greater of Equal Compare 2 i64, result in a GPR
1185    fn i64_cmp_ge_s(
1186        &mut self,
1187        loc_a: Location<Self::GPR, Self::SIMD>,
1188        loc_b: Location<Self::GPR, Self::SIMD>,
1189        ret: Location<Self::GPR, Self::SIMD>,
1190    ) -> Result<(), CompileError>;
1191    /// Signed Greater Than Compare 2 i64, result in a GPR
1192    fn i64_cmp_gt_s(
1193        &mut self,
1194        loc_a: Location<Self::GPR, Self::SIMD>,
1195        loc_b: Location<Self::GPR, Self::SIMD>,
1196        ret: Location<Self::GPR, Self::SIMD>,
1197    ) -> Result<(), CompileError>;
1198    /// Signed Less of Equal Compare 2 i64, result in a GPR
1199    fn i64_cmp_le_s(
1200        &mut self,
1201        loc_a: Location<Self::GPR, Self::SIMD>,
1202        loc_b: Location<Self::GPR, Self::SIMD>,
1203        ret: Location<Self::GPR, Self::SIMD>,
1204    ) -> Result<(), CompileError>;
1205    /// Signed Less Than Compare 2 i64, result in a GPR
1206    fn i64_cmp_lt_s(
1207        &mut self,
1208        loc_a: Location<Self::GPR, Self::SIMD>,
1209        loc_b: Location<Self::GPR, Self::SIMD>,
1210        ret: Location<Self::GPR, Self::SIMD>,
1211    ) -> Result<(), CompileError>;
1212    /// Unsigned Greater of Equal Compare 2 i64, result in a GPR
1213    fn i64_cmp_ge_u(
1214        &mut self,
1215        loc_a: Location<Self::GPR, Self::SIMD>,
1216        loc_b: Location<Self::GPR, Self::SIMD>,
1217        ret: Location<Self::GPR, Self::SIMD>,
1218    ) -> Result<(), CompileError>;
1219    /// Unsigned Greater Than Compare 2 i64, result in a GPR
1220    fn i64_cmp_gt_u(
1221        &mut self,
1222        loc_a: Location<Self::GPR, Self::SIMD>,
1223        loc_b: Location<Self::GPR, Self::SIMD>,
1224        ret: Location<Self::GPR, Self::SIMD>,
1225    ) -> Result<(), CompileError>;
1226    /// Unsigned Less of Equal Compare 2 i64, result in a GPR
1227    fn i64_cmp_le_u(
1228        &mut self,
1229        loc_a: Location<Self::GPR, Self::SIMD>,
1230        loc_b: Location<Self::GPR, Self::SIMD>,
1231        ret: Location<Self::GPR, Self::SIMD>,
1232    ) -> Result<(), CompileError>;
1233    /// Unsigned Less Than Compare 2 i64, result in a GPR
1234    fn i64_cmp_lt_u(
1235        &mut self,
1236        loc_a: Location<Self::GPR, Self::SIMD>,
1237        loc_b: Location<Self::GPR, Self::SIMD>,
1238        ret: Location<Self::GPR, Self::SIMD>,
1239    ) -> Result<(), CompileError>;
1240    /// Not Equal Compare 2 i64, result in a GPR
1241    fn i64_cmp_ne(
1242        &mut self,
1243        loc_a: Location<Self::GPR, Self::SIMD>,
1244        loc_b: Location<Self::GPR, Self::SIMD>,
1245        ret: Location<Self::GPR, Self::SIMD>,
1246    ) -> Result<(), CompileError>;
1247    /// Equal Compare 2 i64, result in a GPR
1248    fn i64_cmp_eq(
1249        &mut self,
1250        loc_a: Location<Self::GPR, Self::SIMD>,
1251        loc_b: Location<Self::GPR, Self::SIMD>,
1252        ret: Location<Self::GPR, Self::SIMD>,
1253    ) -> Result<(), CompileError>;
1254    /// Count Leading 0 bit of an i64
1255    fn i64_clz(
1256        &mut self,
1257        loc: Location<Self::GPR, Self::SIMD>,
1258        ret: Location<Self::GPR, Self::SIMD>,
1259    ) -> Result<(), CompileError>;
1260    /// Count Trailling 0 bit of an i64
1261    fn i64_ctz(
1262        &mut self,
1263        loc: Location<Self::GPR, Self::SIMD>,
1264        ret: Location<Self::GPR, Self::SIMD>,
1265    ) -> Result<(), CompileError>;
1266    /// Count the number of 1 bit of an i64
1267    fn i64_popcnt(
1268        &mut self,
1269        loc: Location<Self::GPR, Self::SIMD>,
1270        ret: Location<Self::GPR, Self::SIMD>,
1271    ) -> Result<(), CompileError>;
1272    /// i64 Logical Shift Left
1273    fn i64_shl(
1274        &mut self,
1275        loc_a: Location<Self::GPR, Self::SIMD>,
1276        loc_b: Location<Self::GPR, Self::SIMD>,
1277        ret: Location<Self::GPR, Self::SIMD>,
1278    ) -> Result<(), CompileError>;
1279    /// i64 Logical Shift Right
1280    fn i64_shr(
1281        &mut self,
1282        loc_a: Location<Self::GPR, Self::SIMD>,
1283        loc_b: Location<Self::GPR, Self::SIMD>,
1284        ret: Location<Self::GPR, Self::SIMD>,
1285    ) -> Result<(), CompileError>;
1286    /// i64 Arithmetic Shift Right
1287    fn i64_sar(
1288        &mut self,
1289        loc_a: Location<Self::GPR, Self::SIMD>,
1290        loc_b: Location<Self::GPR, Self::SIMD>,
1291        ret: Location<Self::GPR, Self::SIMD>,
1292    ) -> Result<(), CompileError>;
1293    /// i64 Roll Left
1294    fn i64_rol(
1295        &mut self,
1296        loc_a: Location<Self::GPR, Self::SIMD>,
1297        loc_b: Location<Self::GPR, Self::SIMD>,
1298        ret: Location<Self::GPR, Self::SIMD>,
1299    ) -> Result<(), CompileError>;
1300    /// i64 Roll Right
1301    fn i64_ror(
1302        &mut self,
1303        loc_a: Location<Self::GPR, Self::SIMD>,
1304        loc_b: Location<Self::GPR, Self::SIMD>,
1305        ret: Location<Self::GPR, Self::SIMD>,
1306    ) -> Result<(), CompileError>;
1307    /// i64 load
1308    #[allow(clippy::too_many_arguments)]
1309    fn i64_load(
1310        &mut self,
1311        addr: Location<Self::GPR, Self::SIMD>,
1312        memarg: &MemArg,
1313        ret: Location<Self::GPR, Self::SIMD>,
1314        need_check: bool,
1315        imported_memories: bool,
1316        offset: i32,
1317        heap_access_oob: Label,
1318        unaligned_atomic: Label,
1319    ) -> Result<(), CompileError>;
1320    /// i64 load of an unsigned 8bits
1321    #[allow(clippy::too_many_arguments)]
1322    fn i64_load_8u(
1323        &mut self,
1324        addr: Location<Self::GPR, Self::SIMD>,
1325        memarg: &MemArg,
1326        ret: Location<Self::GPR, Self::SIMD>,
1327        need_check: bool,
1328        imported_memories: bool,
1329        offset: i32,
1330        heap_access_oob: Label,
1331        unaligned_atomic: Label,
1332    ) -> Result<(), CompileError>;
1333    /// i64 load of an signed 8bits
1334    #[allow(clippy::too_many_arguments)]
1335    fn i64_load_8s(
1336        &mut self,
1337        addr: Location<Self::GPR, Self::SIMD>,
1338        memarg: &MemArg,
1339        ret: Location<Self::GPR, Self::SIMD>,
1340        need_check: bool,
1341        imported_memories: bool,
1342        offset: i32,
1343        heap_access_oob: Label,
1344        unaligned_atomic: Label,
1345    ) -> Result<(), CompileError>;
1346    /// i64 load of an unsigned 32bits
1347    #[allow(clippy::too_many_arguments)]
1348    fn i64_load_32u(
1349        &mut self,
1350        addr: Location<Self::GPR, Self::SIMD>,
1351        memarg: &MemArg,
1352        ret: Location<Self::GPR, Self::SIMD>,
1353        need_check: bool,
1354        imported_memories: bool,
1355        offset: i32,
1356        heap_access_oob: Label,
1357        unaligned_atomic: Label,
1358    ) -> Result<(), CompileError>;
1359    /// i64 load of an signed 32bits
1360    #[allow(clippy::too_many_arguments)]
1361    fn i64_load_32s(
1362        &mut self,
1363        addr: Location<Self::GPR, Self::SIMD>,
1364        memarg: &MemArg,
1365        ret: Location<Self::GPR, Self::SIMD>,
1366        need_check: bool,
1367        imported_memories: bool,
1368        offset: i32,
1369        heap_access_oob: Label,
1370        unaligned_atomic: Label,
1371    ) -> Result<(), CompileError>;
1372    /// i64 load of an signed 16bits
1373    #[allow(clippy::too_many_arguments)]
1374    fn i64_load_16u(
1375        &mut self,
1376        addr: Location<Self::GPR, Self::SIMD>,
1377        memarg: &MemArg,
1378        ret: Location<Self::GPR, Self::SIMD>,
1379        need_check: bool,
1380        imported_memories: bool,
1381        offset: i32,
1382        heap_access_oob: Label,
1383        unaligned_atomic: Label,
1384    ) -> Result<(), CompileError>;
1385    /// i64 load of an signed 16bits
1386    #[allow(clippy::too_many_arguments)]
1387    fn i64_load_16s(
1388        &mut self,
1389        addr: Location<Self::GPR, Self::SIMD>,
1390        memarg: &MemArg,
1391        ret: Location<Self::GPR, Self::SIMD>,
1392        need_check: bool,
1393        imported_memories: bool,
1394        offset: i32,
1395        heap_access_oob: Label,
1396        unaligned_atomic: Label,
1397    ) -> Result<(), CompileError>;
1398    /// i64 atomic load
1399    #[allow(clippy::too_many_arguments)]
1400    fn i64_atomic_load(
1401        &mut self,
1402        addr: Location<Self::GPR, Self::SIMD>,
1403        memarg: &MemArg,
1404        ret: Location<Self::GPR, Self::SIMD>,
1405        need_check: bool,
1406        imported_memories: bool,
1407        offset: i32,
1408        heap_access_oob: Label,
1409        unaligned_atomic: Label,
1410    ) -> Result<(), CompileError>;
1411    /// i64 atomic load from unsigned 8bits
1412    #[allow(clippy::too_many_arguments)]
1413    fn i64_atomic_load_8u(
1414        &mut self,
1415        addr: Location<Self::GPR, Self::SIMD>,
1416        memarg: &MemArg,
1417        ret: Location<Self::GPR, Self::SIMD>,
1418        need_check: bool,
1419        imported_memories: bool,
1420        offset: i32,
1421        heap_access_oob: Label,
1422        unaligned_atomic: Label,
1423    ) -> Result<(), CompileError>;
1424    /// i64 atomic load from unsigned 16bits
1425    #[allow(clippy::too_many_arguments)]
1426    fn i64_atomic_load_16u(
1427        &mut self,
1428        addr: Location<Self::GPR, Self::SIMD>,
1429        memarg: &MemArg,
1430        ret: Location<Self::GPR, Self::SIMD>,
1431        need_check: bool,
1432        imported_memories: bool,
1433        offset: i32,
1434        heap_access_oob: Label,
1435        unaligned_atomic: Label,
1436    ) -> Result<(), CompileError>;
1437    /// i64 atomic load from unsigned 32bits
1438    #[allow(clippy::too_many_arguments)]
1439    fn i64_atomic_load_32u(
1440        &mut self,
1441        addr: Location<Self::GPR, Self::SIMD>,
1442        memarg: &MemArg,
1443        ret: Location<Self::GPR, Self::SIMD>,
1444        need_check: bool,
1445        imported_memories: bool,
1446        offset: i32,
1447        heap_access_oob: Label,
1448        unaligned_atomic: Label,
1449    ) -> Result<(), CompileError>;
1450    /// i64 save
1451    #[allow(clippy::too_many_arguments)]
1452    fn i64_save(
1453        &mut self,
1454        value: Location<Self::GPR, Self::SIMD>,
1455        memarg: &MemArg,
1456        addr: Location<Self::GPR, Self::SIMD>,
1457        need_check: bool,
1458        imported_memories: bool,
1459        offset: i32,
1460        heap_access_oob: Label,
1461        unaligned_atomic: Label,
1462    ) -> Result<(), CompileError>;
1463    /// i64 save of the lower 8bits
1464    #[allow(clippy::too_many_arguments)]
1465    fn i64_save_8(
1466        &mut self,
1467        value: Location<Self::GPR, Self::SIMD>,
1468        memarg: &MemArg,
1469        addr: Location<Self::GPR, Self::SIMD>,
1470        need_check: bool,
1471        imported_memories: bool,
1472        offset: i32,
1473        heap_access_oob: Label,
1474        unaligned_atomic: Label,
1475    ) -> Result<(), CompileError>;
1476    /// i64 save of the lower 16bits
1477    #[allow(clippy::too_many_arguments)]
1478    fn i64_save_16(
1479        &mut self,
1480        value: Location<Self::GPR, Self::SIMD>,
1481        memarg: &MemArg,
1482        addr: Location<Self::GPR, Self::SIMD>,
1483        need_check: bool,
1484        imported_memories: bool,
1485        offset: i32,
1486        heap_access_oob: Label,
1487        unaligned_atomic: Label,
1488    ) -> Result<(), CompileError>;
1489    /// i64 save of the lower 32bits
1490    #[allow(clippy::too_many_arguments)]
1491    fn i64_save_32(
1492        &mut self,
1493        value: Location<Self::GPR, Self::SIMD>,
1494        memarg: &MemArg,
1495        addr: Location<Self::GPR, Self::SIMD>,
1496        need_check: bool,
1497        imported_memories: bool,
1498        offset: i32,
1499        heap_access_oob: Label,
1500        unaligned_atomic: Label,
1501    ) -> Result<(), CompileError>;
1502    /// i64 atomic save
1503    #[allow(clippy::too_many_arguments)]
1504    fn i64_atomic_save(
1505        &mut self,
1506        value: Location<Self::GPR, Self::SIMD>,
1507        memarg: &MemArg,
1508        addr: Location<Self::GPR, Self::SIMD>,
1509        need_check: bool,
1510        imported_memories: bool,
1511        offset: i32,
1512        heap_access_oob: Label,
1513        unaligned_atomic: Label,
1514    ) -> Result<(), CompileError>;
1515    /// i64 atomic save of a the lower 8bits
1516    #[allow(clippy::too_many_arguments)]
1517    fn i64_atomic_save_8(
1518        &mut self,
1519        value: Location<Self::GPR, Self::SIMD>,
1520        memarg: &MemArg,
1521        addr: Location<Self::GPR, Self::SIMD>,
1522        need_check: bool,
1523        imported_memories: bool,
1524        offset: i32,
1525        heap_access_oob: Label,
1526        unaligned_atomic: Label,
1527    ) -> Result<(), CompileError>;
1528    /// i64 atomic save of a the lower 16bits
1529    #[allow(clippy::too_many_arguments)]
1530    fn i64_atomic_save_16(
1531        &mut self,
1532        value: Location<Self::GPR, Self::SIMD>,
1533        memarg: &MemArg,
1534        addr: Location<Self::GPR, Self::SIMD>,
1535        need_check: bool,
1536        imported_memories: bool,
1537        offset: i32,
1538        heap_access_oob: Label,
1539        unaligned_atomic: Label,
1540    ) -> Result<(), CompileError>;
1541    /// i64 atomic save of a the lower 32bits
1542    #[allow(clippy::too_many_arguments)]
1543    fn i64_atomic_save_32(
1544        &mut self,
1545        value: Location<Self::GPR, Self::SIMD>,
1546        memarg: &MemArg,
1547        addr: Location<Self::GPR, Self::SIMD>,
1548        need_check: bool,
1549        imported_memories: bool,
1550        offset: i32,
1551        heap_access_oob: Label,
1552        unaligned_atomic: Label,
1553    ) -> Result<(), CompileError>;
1554    /// i64 atomic Add with i64
1555    #[allow(clippy::too_many_arguments)]
1556    fn i64_atomic_add(
1557        &mut self,
1558        loc: Location<Self::GPR, Self::SIMD>,
1559        target: Location<Self::GPR, Self::SIMD>,
1560        memarg: &MemArg,
1561        ret: Location<Self::GPR, Self::SIMD>,
1562        need_check: bool,
1563        imported_memories: bool,
1564        offset: i32,
1565        heap_access_oob: Label,
1566        unaligned_atomic: Label,
1567    ) -> Result<(), CompileError>;
1568    /// i64 atomic Add with unsigned 8bits
1569    #[allow(clippy::too_many_arguments)]
1570    fn i64_atomic_add_8u(
1571        &mut self,
1572        loc: Location<Self::GPR, Self::SIMD>,
1573        target: Location<Self::GPR, Self::SIMD>,
1574        memarg: &MemArg,
1575        ret: Location<Self::GPR, Self::SIMD>,
1576        need_check: bool,
1577        imported_memories: bool,
1578        offset: i32,
1579        heap_access_oob: Label,
1580        unaligned_atomic: Label,
1581    ) -> Result<(), CompileError>;
1582    /// i64 atomic Add with unsigned 16bits
1583    #[allow(clippy::too_many_arguments)]
1584    fn i64_atomic_add_16u(
1585        &mut self,
1586        loc: Location<Self::GPR, Self::SIMD>,
1587        target: Location<Self::GPR, Self::SIMD>,
1588        memarg: &MemArg,
1589        ret: Location<Self::GPR, Self::SIMD>,
1590        need_check: bool,
1591        imported_memories: bool,
1592        offset: i32,
1593        heap_access_oob: Label,
1594        unaligned_atomic: Label,
1595    ) -> Result<(), CompileError>;
1596    /// i64 atomic Add with unsigned 32bits
1597    #[allow(clippy::too_many_arguments)]
1598    fn i64_atomic_add_32u(
1599        &mut self,
1600        loc: Location<Self::GPR, Self::SIMD>,
1601        target: Location<Self::GPR, Self::SIMD>,
1602        memarg: &MemArg,
1603        ret: Location<Self::GPR, Self::SIMD>,
1604        need_check: bool,
1605        imported_memories: bool,
1606        offset: i32,
1607        heap_access_oob: Label,
1608        unaligned_atomic: Label,
1609    ) -> Result<(), CompileError>;
1610    /// i64 atomic Sub with i64
1611    #[allow(clippy::too_many_arguments)]
1612    fn i64_atomic_sub(
1613        &mut self,
1614        loc: Location<Self::GPR, Self::SIMD>,
1615        target: Location<Self::GPR, Self::SIMD>,
1616        memarg: &MemArg,
1617        ret: Location<Self::GPR, Self::SIMD>,
1618        need_check: bool,
1619        imported_memories: bool,
1620        offset: i32,
1621        heap_access_oob: Label,
1622        unaligned_atomic: Label,
1623    ) -> Result<(), CompileError>;
1624    /// i64 atomic Sub with unsigned 8bits
1625    #[allow(clippy::too_many_arguments)]
1626    fn i64_atomic_sub_8u(
1627        &mut self,
1628        loc: Location<Self::GPR, Self::SIMD>,
1629        target: Location<Self::GPR, Self::SIMD>,
1630        memarg: &MemArg,
1631        ret: Location<Self::GPR, Self::SIMD>,
1632        need_check: bool,
1633        imported_memories: bool,
1634        offset: i32,
1635        heap_access_oob: Label,
1636        unaligned_atomic: Label,
1637    ) -> Result<(), CompileError>;
1638    /// i64 atomic Sub with unsigned 16bits
1639    #[allow(clippy::too_many_arguments)]
1640    fn i64_atomic_sub_16u(
1641        &mut self,
1642        loc: Location<Self::GPR, Self::SIMD>,
1643        target: Location<Self::GPR, Self::SIMD>,
1644        memarg: &MemArg,
1645        ret: Location<Self::GPR, Self::SIMD>,
1646        need_check: bool,
1647        imported_memories: bool,
1648        offset: i32,
1649        heap_access_oob: Label,
1650        unaligned_atomic: Label,
1651    ) -> Result<(), CompileError>;
1652    /// i64 atomic Sub with unsigned 32bits
1653    #[allow(clippy::too_many_arguments)]
1654    fn i64_atomic_sub_32u(
1655        &mut self,
1656        loc: Location<Self::GPR, Self::SIMD>,
1657        target: Location<Self::GPR, Self::SIMD>,
1658        memarg: &MemArg,
1659        ret: Location<Self::GPR, Self::SIMD>,
1660        need_check: bool,
1661        imported_memories: bool,
1662        offset: i32,
1663        heap_access_oob: Label,
1664        unaligned_atomic: Label,
1665    ) -> Result<(), CompileError>;
1666    /// i64 atomic And with i64
1667    #[allow(clippy::too_many_arguments)]
1668    fn i64_atomic_and(
1669        &mut self,
1670        loc: Location<Self::GPR, Self::SIMD>,
1671        target: Location<Self::GPR, Self::SIMD>,
1672        memarg: &MemArg,
1673        ret: Location<Self::GPR, Self::SIMD>,
1674        need_check: bool,
1675        imported_memories: bool,
1676        offset: i32,
1677        heap_access_oob: Label,
1678        unaligned_atomic: Label,
1679    ) -> Result<(), CompileError>;
1680    /// i64 atomic And with unsigned 8bits
1681    #[allow(clippy::too_many_arguments)]
1682    fn i64_atomic_and_8u(
1683        &mut self,
1684        loc: Location<Self::GPR, Self::SIMD>,
1685        target: Location<Self::GPR, Self::SIMD>,
1686        memarg: &MemArg,
1687        ret: Location<Self::GPR, Self::SIMD>,
1688        need_check: bool,
1689        imported_memories: bool,
1690        offset: i32,
1691        heap_access_oob: Label,
1692        unaligned_atomic: Label,
1693    ) -> Result<(), CompileError>;
1694    /// i64 atomic And with unsigned 16bits
1695    #[allow(clippy::too_many_arguments)]
1696    fn i64_atomic_and_16u(
1697        &mut self,
1698        loc: Location<Self::GPR, Self::SIMD>,
1699        target: Location<Self::GPR, Self::SIMD>,
1700        memarg: &MemArg,
1701        ret: Location<Self::GPR, Self::SIMD>,
1702        need_check: bool,
1703        imported_memories: bool,
1704        offset: i32,
1705        heap_access_oob: Label,
1706        unaligned_atomic: Label,
1707    ) -> Result<(), CompileError>;
1708    /// i64 atomic And with unsigned 32bits
1709    #[allow(clippy::too_many_arguments)]
1710    fn i64_atomic_and_32u(
1711        &mut self,
1712        loc: Location<Self::GPR, Self::SIMD>,
1713        target: Location<Self::GPR, Self::SIMD>,
1714        memarg: &MemArg,
1715        ret: Location<Self::GPR, Self::SIMD>,
1716        need_check: bool,
1717        imported_memories: bool,
1718        offset: i32,
1719        heap_access_oob: Label,
1720        unaligned_atomic: Label,
1721    ) -> Result<(), CompileError>;
1722    /// i64 atomic Or with i64
1723    #[allow(clippy::too_many_arguments)]
1724    fn i64_atomic_or(
1725        &mut self,
1726        loc: Location<Self::GPR, Self::SIMD>,
1727        target: Location<Self::GPR, Self::SIMD>,
1728        memarg: &MemArg,
1729        ret: Location<Self::GPR, Self::SIMD>,
1730        need_check: bool,
1731        imported_memories: bool,
1732        offset: i32,
1733        heap_access_oob: Label,
1734        unaligned_atomic: Label,
1735    ) -> Result<(), CompileError>;
1736    /// i64 atomic Or with unsigned 8bits
1737    #[allow(clippy::too_many_arguments)]
1738    fn i64_atomic_or_8u(
1739        &mut self,
1740        loc: Location<Self::GPR, Self::SIMD>,
1741        target: Location<Self::GPR, Self::SIMD>,
1742        memarg: &MemArg,
1743        ret: Location<Self::GPR, Self::SIMD>,
1744        need_check: bool,
1745        imported_memories: bool,
1746        offset: i32,
1747        heap_access_oob: Label,
1748        unaligned_atomic: Label,
1749    ) -> Result<(), CompileError>;
1750    /// i64 atomic Or with unsigned 16bits
1751    #[allow(clippy::too_many_arguments)]
1752    fn i64_atomic_or_16u(
1753        &mut self,
1754        loc: Location<Self::GPR, Self::SIMD>,
1755        target: Location<Self::GPR, Self::SIMD>,
1756        memarg: &MemArg,
1757        ret: Location<Self::GPR, Self::SIMD>,
1758        need_check: bool,
1759        imported_memories: bool,
1760        offset: i32,
1761        heap_access_oob: Label,
1762        unaligned_atomic: Label,
1763    ) -> Result<(), CompileError>;
1764    /// i64 atomic Or with unsigned 32bits
1765    #[allow(clippy::too_many_arguments)]
1766    fn i64_atomic_or_32u(
1767        &mut self,
1768        loc: Location<Self::GPR, Self::SIMD>,
1769        target: Location<Self::GPR, Self::SIMD>,
1770        memarg: &MemArg,
1771        ret: Location<Self::GPR, Self::SIMD>,
1772        need_check: bool,
1773        imported_memories: bool,
1774        offset: i32,
1775        heap_access_oob: Label,
1776        unaligned_atomic: Label,
1777    ) -> Result<(), CompileError>;
1778    /// i64 atomic Xor with i64
1779    #[allow(clippy::too_many_arguments)]
1780    fn i64_atomic_xor(
1781        &mut self,
1782        loc: Location<Self::GPR, Self::SIMD>,
1783        target: Location<Self::GPR, Self::SIMD>,
1784        memarg: &MemArg,
1785        ret: Location<Self::GPR, Self::SIMD>,
1786        need_check: bool,
1787        imported_memories: bool,
1788        offset: i32,
1789        heap_access_oob: Label,
1790        unaligned_atomic: Label,
1791    ) -> Result<(), CompileError>;
1792    /// i64 atomic Xor with unsigned 8bits
1793    #[allow(clippy::too_many_arguments)]
1794    fn i64_atomic_xor_8u(
1795        &mut self,
1796        loc: Location<Self::GPR, Self::SIMD>,
1797        target: Location<Self::GPR, Self::SIMD>,
1798        memarg: &MemArg,
1799        ret: Location<Self::GPR, Self::SIMD>,
1800        need_check: bool,
1801        imported_memories: bool,
1802        offset: i32,
1803        heap_access_oob: Label,
1804        unaligned_atomic: Label,
1805    ) -> Result<(), CompileError>;
1806    /// i64 atomic Xor with unsigned 16bits
1807    #[allow(clippy::too_many_arguments)]
1808    fn i64_atomic_xor_16u(
1809        &mut self,
1810        loc: Location<Self::GPR, Self::SIMD>,
1811        target: Location<Self::GPR, Self::SIMD>,
1812        memarg: &MemArg,
1813        ret: Location<Self::GPR, Self::SIMD>,
1814        need_check: bool,
1815        imported_memories: bool,
1816        offset: i32,
1817        heap_access_oob: Label,
1818        unaligned_atomic: Label,
1819    ) -> Result<(), CompileError>;
1820    /// i64 atomic Xor with unsigned 32bits
1821    #[allow(clippy::too_many_arguments)]
1822    fn i64_atomic_xor_32u(
1823        &mut self,
1824        loc: Location<Self::GPR, Self::SIMD>,
1825        target: Location<Self::GPR, Self::SIMD>,
1826        memarg: &MemArg,
1827        ret: Location<Self::GPR, Self::SIMD>,
1828        need_check: bool,
1829        imported_memories: bool,
1830        offset: i32,
1831        heap_access_oob: Label,
1832        unaligned_atomic: Label,
1833    ) -> Result<(), CompileError>;
1834    /// i64 atomic Exchange with i64
1835    #[allow(clippy::too_many_arguments)]
1836    fn i64_atomic_xchg(
1837        &mut self,
1838        loc: Location<Self::GPR, Self::SIMD>,
1839        target: Location<Self::GPR, Self::SIMD>,
1840        memarg: &MemArg,
1841        ret: Location<Self::GPR, Self::SIMD>,
1842        need_check: bool,
1843        imported_memories: bool,
1844        offset: i32,
1845        heap_access_oob: Label,
1846        unaligned_atomic: Label,
1847    ) -> Result<(), CompileError>;
1848    /// i64 atomic Exchange with u8
1849    #[allow(clippy::too_many_arguments)]
1850    fn i64_atomic_xchg_8u(
1851        &mut self,
1852        loc: Location<Self::GPR, Self::SIMD>,
1853        target: Location<Self::GPR, Self::SIMD>,
1854        memarg: &MemArg,
1855        ret: Location<Self::GPR, Self::SIMD>,
1856        need_check: bool,
1857        imported_memories: bool,
1858        offset: i32,
1859        heap_access_oob: Label,
1860        unaligned_atomic: Label,
1861    ) -> Result<(), CompileError>;
1862    /// i64 atomic Exchange with u16
1863    #[allow(clippy::too_many_arguments)]
1864    fn i64_atomic_xchg_16u(
1865        &mut self,
1866        loc: Location<Self::GPR, Self::SIMD>,
1867        target: Location<Self::GPR, Self::SIMD>,
1868        memarg: &MemArg,
1869        ret: Location<Self::GPR, Self::SIMD>,
1870        need_check: bool,
1871        imported_memories: bool,
1872        offset: i32,
1873        heap_access_oob: Label,
1874        unaligned_atomic: Label,
1875    ) -> Result<(), CompileError>;
1876    /// i64 atomic Exchange with u32
1877    #[allow(clippy::too_many_arguments)]
1878    fn i64_atomic_xchg_32u(
1879        &mut self,
1880        loc: Location<Self::GPR, Self::SIMD>,
1881        target: Location<Self::GPR, Self::SIMD>,
1882        memarg: &MemArg,
1883        ret: Location<Self::GPR, Self::SIMD>,
1884        need_check: bool,
1885        imported_memories: bool,
1886        offset: i32,
1887        heap_access_oob: Label,
1888        unaligned_atomic: Label,
1889    ) -> Result<(), CompileError>;
1890    /// i64 atomic Compare and Exchange with i32
1891    #[allow(clippy::too_many_arguments)]
1892    fn i64_atomic_cmpxchg(
1893        &mut self,
1894        new: Location<Self::GPR, Self::SIMD>,
1895        cmp: Location<Self::GPR, Self::SIMD>,
1896        target: Location<Self::GPR, Self::SIMD>,
1897        memarg: &MemArg,
1898        ret: Location<Self::GPR, Self::SIMD>,
1899        need_check: bool,
1900        imported_memories: bool,
1901        offset: i32,
1902        heap_access_oob: Label,
1903        unaligned_atomic: Label,
1904    ) -> Result<(), CompileError>;
1905    /// i64 atomic Compare and Exchange with u8
1906    #[allow(clippy::too_many_arguments)]
1907    fn i64_atomic_cmpxchg_8u(
1908        &mut self,
1909        new: Location<Self::GPR, Self::SIMD>,
1910        cmp: Location<Self::GPR, Self::SIMD>,
1911        target: Location<Self::GPR, Self::SIMD>,
1912        memarg: &MemArg,
1913        ret: Location<Self::GPR, Self::SIMD>,
1914        need_check: bool,
1915        imported_memories: bool,
1916        offset: i32,
1917        heap_access_oob: Label,
1918        unaligned_atomic: Label,
1919    ) -> Result<(), CompileError>;
1920    /// i64 atomic Compare and Exchange with u16
1921    #[allow(clippy::too_many_arguments)]
1922    fn i64_atomic_cmpxchg_16u(
1923        &mut self,
1924        new: Location<Self::GPR, Self::SIMD>,
1925        cmp: Location<Self::GPR, Self::SIMD>,
1926        target: Location<Self::GPR, Self::SIMD>,
1927        memarg: &MemArg,
1928        ret: Location<Self::GPR, Self::SIMD>,
1929        need_check: bool,
1930        imported_memories: bool,
1931        offset: i32,
1932        heap_access_oob: Label,
1933        unaligned_atomic: Label,
1934    ) -> Result<(), CompileError>;
1935    /// i64 atomic Compare and Exchange with u32
1936    #[allow(clippy::too_many_arguments)]
1937    fn i64_atomic_cmpxchg_32u(
1938        &mut self,
1939        new: Location<Self::GPR, Self::SIMD>,
1940        cmp: Location<Self::GPR, Self::SIMD>,
1941        target: Location<Self::GPR, Self::SIMD>,
1942        memarg: &MemArg,
1943        ret: Location<Self::GPR, Self::SIMD>,
1944        need_check: bool,
1945        imported_memories: bool,
1946        offset: i32,
1947        heap_access_oob: Label,
1948        unaligned_atomic: Label,
1949    ) -> Result<(), CompileError>;
1950
1951    /// load an F32
1952    #[allow(clippy::too_many_arguments)]
1953    fn f32_load(
1954        &mut self,
1955        addr: Location<Self::GPR, Self::SIMD>,
1956        memarg: &MemArg,
1957        ret: Location<Self::GPR, Self::SIMD>,
1958        need_check: bool,
1959        imported_memories: bool,
1960        offset: i32,
1961        heap_access_oob: Label,
1962        unaligned_atomic: Label,
1963    ) -> Result<(), CompileError>;
1964    /// f32 save
1965    #[allow(clippy::too_many_arguments)]
1966    fn f32_save(
1967        &mut self,
1968        value: Location<Self::GPR, Self::SIMD>,
1969        memarg: &MemArg,
1970        addr: Location<Self::GPR, Self::SIMD>,
1971        canonicalize: bool,
1972        need_check: bool,
1973        imported_memories: bool,
1974        offset: i32,
1975        heap_access_oob: Label,
1976        unaligned_atomic: Label,
1977    ) -> Result<(), CompileError>;
1978    /// load an F64
1979    #[allow(clippy::too_many_arguments)]
1980    fn f64_load(
1981        &mut self,
1982        addr: Location<Self::GPR, Self::SIMD>,
1983        memarg: &MemArg,
1984        ret: Location<Self::GPR, Self::SIMD>,
1985        need_check: bool,
1986        imported_memories: bool,
1987        offset: i32,
1988        heap_access_oob: Label,
1989        unaligned_atomic: Label,
1990    ) -> Result<(), CompileError>;
1991    /// f64 save
1992    #[allow(clippy::too_many_arguments)]
1993    fn f64_save(
1994        &mut self,
1995        value: Location<Self::GPR, Self::SIMD>,
1996        memarg: &MemArg,
1997        addr: Location<Self::GPR, Self::SIMD>,
1998        canonicalize: bool,
1999        need_check: bool,
2000        imported_memories: bool,
2001        offset: i32,
2002        heap_access_oob: Label,
2003        unaligned_atomic: Label,
2004    ) -> Result<(), CompileError>;
2005    /// Convert a F64 from I64, signed or unsigned
2006    fn convert_f64_i64(
2007        &mut self,
2008        loc: Location<Self::GPR, Self::SIMD>,
2009        signed: bool,
2010        ret: Location<Self::GPR, Self::SIMD>,
2011    ) -> Result<(), CompileError>;
2012    /// Convert a F64 from I32, signed or unsigned
2013    fn convert_f64_i32(
2014        &mut self,
2015        loc: Location<Self::GPR, Self::SIMD>,
2016        signed: bool,
2017        ret: Location<Self::GPR, Self::SIMD>,
2018    ) -> Result<(), CompileError>;
2019    /// Convert a F32 from I64, signed or unsigned
2020    fn convert_f32_i64(
2021        &mut self,
2022        loc: Location<Self::GPR, Self::SIMD>,
2023        signed: bool,
2024        ret: Location<Self::GPR, Self::SIMD>,
2025    ) -> Result<(), CompileError>;
2026    /// Convert a F32 from I32, signed or unsigned
2027    fn convert_f32_i32(
2028        &mut self,
2029        loc: Location<Self::GPR, Self::SIMD>,
2030        signed: bool,
2031        ret: Location<Self::GPR, Self::SIMD>,
2032    ) -> Result<(), CompileError>;
2033    /// Convert a F64 to I64, signed or unsigned, without or without saturation
2034    fn convert_i64_f64(
2035        &mut self,
2036        loc: Location<Self::GPR, Self::SIMD>,
2037        ret: Location<Self::GPR, Self::SIMD>,
2038        signed: bool,
2039        sat: bool,
2040    ) -> Result<(), CompileError>;
2041    /// Convert a F64 to I32, signed or unsigned, without or without saturation
2042    fn convert_i32_f64(
2043        &mut self,
2044        loc: Location<Self::GPR, Self::SIMD>,
2045        ret: Location<Self::GPR, Self::SIMD>,
2046        signed: bool,
2047        sat: bool,
2048    ) -> Result<(), CompileError>;
2049    /// Convert a F32 to I64, signed or unsigned, without or without saturation
2050    fn convert_i64_f32(
2051        &mut self,
2052        loc: Location<Self::GPR, Self::SIMD>,
2053        ret: Location<Self::GPR, Self::SIMD>,
2054        signed: bool,
2055        sat: bool,
2056    ) -> Result<(), CompileError>;
2057    /// Convert a F32 to I32, signed or unsigned, without or without saturation
2058    fn convert_i32_f32(
2059        &mut self,
2060        loc: Location<Self::GPR, Self::SIMD>,
2061        ret: Location<Self::GPR, Self::SIMD>,
2062        signed: bool,
2063        sat: bool,
2064    ) -> Result<(), CompileError>;
2065    /// Convert a F32 to F64
2066    fn convert_f64_f32(
2067        &mut self,
2068        loc: Location<Self::GPR, Self::SIMD>,
2069        ret: Location<Self::GPR, Self::SIMD>,
2070    ) -> Result<(), CompileError>;
2071    /// Convert a F64 to F32
2072    fn convert_f32_f64(
2073        &mut self,
2074        loc: Location<Self::GPR, Self::SIMD>,
2075        ret: Location<Self::GPR, Self::SIMD>,
2076    ) -> Result<(), CompileError>;
2077    /// Negate an F64
2078    fn f64_neg(
2079        &mut self,
2080        loc: Location<Self::GPR, Self::SIMD>,
2081        ret: Location<Self::GPR, Self::SIMD>,
2082    ) -> Result<(), CompileError>;
2083    /// Get the Absolute Value of an F64
2084    fn f64_abs(
2085        &mut self,
2086        loc: Location<Self::GPR, Self::SIMD>,
2087        ret: Location<Self::GPR, Self::SIMD>,
2088    ) -> Result<(), CompileError>;
2089    /// Copy sign from tmp1 Self::GPR to tmp2 Self::GPR
2090    fn emit_i64_copysign(&mut self, tmp1: Self::GPR, tmp2: Self::GPR) -> Result<(), CompileError>;
2091    /// Get the Square Root of an F64
2092    fn f64_sqrt(
2093        &mut self,
2094        loc: Location<Self::GPR, Self::SIMD>,
2095        ret: Location<Self::GPR, Self::SIMD>,
2096    ) -> Result<(), CompileError>;
2097    /// Trunc of an F64
2098    fn f64_trunc(
2099        &mut self,
2100        loc: Location<Self::GPR, Self::SIMD>,
2101        ret: Location<Self::GPR, Self::SIMD>,
2102    ) -> Result<(), CompileError>;
2103    /// Ceil of an F64
2104    fn f64_ceil(
2105        &mut self,
2106        loc: Location<Self::GPR, Self::SIMD>,
2107        ret: Location<Self::GPR, Self::SIMD>,
2108    ) -> Result<(), CompileError>;
2109    /// Floor of an F64
2110    fn f64_floor(
2111        &mut self,
2112        loc: Location<Self::GPR, Self::SIMD>,
2113        ret: Location<Self::GPR, Self::SIMD>,
2114    ) -> Result<(), CompileError>;
2115    /// Round at nearest int of an F64
2116    fn f64_nearest(
2117        &mut self,
2118        loc: Location<Self::GPR, Self::SIMD>,
2119        ret: Location<Self::GPR, Self::SIMD>,
2120    ) -> Result<(), CompileError>;
2121    /// Greater of Equal Compare 2 F64, result in a GPR
2122    fn f64_cmp_ge(
2123        &mut self,
2124        loc_a: Location<Self::GPR, Self::SIMD>,
2125        loc_b: Location<Self::GPR, Self::SIMD>,
2126        ret: Location<Self::GPR, Self::SIMD>,
2127    ) -> Result<(), CompileError>;
2128    /// Greater Than Compare 2 F64, result in a GPR
2129    fn f64_cmp_gt(
2130        &mut self,
2131        loc_a: Location<Self::GPR, Self::SIMD>,
2132        loc_b: Location<Self::GPR, Self::SIMD>,
2133        ret: Location<Self::GPR, Self::SIMD>,
2134    ) -> Result<(), CompileError>;
2135    /// Less of Equal Compare 2 F64, result in a GPR
2136    fn f64_cmp_le(
2137        &mut self,
2138        loc_a: Location<Self::GPR, Self::SIMD>,
2139        loc_b: Location<Self::GPR, Self::SIMD>,
2140        ret: Location<Self::GPR, Self::SIMD>,
2141    ) -> Result<(), CompileError>;
2142    /// Less Than Compare 2 F64, result in a GPR
2143    fn f64_cmp_lt(
2144        &mut self,
2145        loc_a: Location<Self::GPR, Self::SIMD>,
2146        loc_b: Location<Self::GPR, Self::SIMD>,
2147        ret: Location<Self::GPR, Self::SIMD>,
2148    ) -> Result<(), CompileError>;
2149    /// Not Equal Compare 2 F64, result in a GPR
2150    fn f64_cmp_ne(
2151        &mut self,
2152        loc_a: Location<Self::GPR, Self::SIMD>,
2153        loc_b: Location<Self::GPR, Self::SIMD>,
2154        ret: Location<Self::GPR, Self::SIMD>,
2155    ) -> Result<(), CompileError>;
2156    /// Equal Compare 2 F64, result in a GPR
2157    fn f64_cmp_eq(
2158        &mut self,
2159        loc_a: Location<Self::GPR, Self::SIMD>,
2160        loc_b: Location<Self::GPR, Self::SIMD>,
2161        ret: Location<Self::GPR, Self::SIMD>,
2162    ) -> Result<(), CompileError>;
2163    /// get Min for 2 F64 values
2164    fn f64_min(
2165        &mut self,
2166        loc_a: Location<Self::GPR, Self::SIMD>,
2167        loc_b: Location<Self::GPR, Self::SIMD>,
2168        ret: Location<Self::GPR, Self::SIMD>,
2169    ) -> Result<(), CompileError>;
2170    /// get Max for 2 F64 values
2171    fn f64_max(
2172        &mut self,
2173        loc_a: Location<Self::GPR, Self::SIMD>,
2174        loc_b: Location<Self::GPR, Self::SIMD>,
2175        ret: Location<Self::GPR, Self::SIMD>,
2176    ) -> Result<(), CompileError>;
2177    /// Add 2 F64 values
2178    fn f64_add(
2179        &mut self,
2180        loc_a: Location<Self::GPR, Self::SIMD>,
2181        loc_b: Location<Self::GPR, Self::SIMD>,
2182        ret: Location<Self::GPR, Self::SIMD>,
2183    ) -> Result<(), CompileError>;
2184    /// Sub 2 F64 values
2185    fn f64_sub(
2186        &mut self,
2187        loc_a: Location<Self::GPR, Self::SIMD>,
2188        loc_b: Location<Self::GPR, Self::SIMD>,
2189        ret: Location<Self::GPR, Self::SIMD>,
2190    ) -> Result<(), CompileError>;
2191    /// Multiply 2 F64 values
2192    fn f64_mul(
2193        &mut self,
2194        loc_a: Location<Self::GPR, Self::SIMD>,
2195        loc_b: Location<Self::GPR, Self::SIMD>,
2196        ret: Location<Self::GPR, Self::SIMD>,
2197    ) -> Result<(), CompileError>;
2198    /// Divide 2 F64 values
2199    fn f64_div(
2200        &mut self,
2201        loc_a: Location<Self::GPR, Self::SIMD>,
2202        loc_b: Location<Self::GPR, Self::SIMD>,
2203        ret: Location<Self::GPR, Self::SIMD>,
2204    ) -> Result<(), CompileError>;
2205    /// Negate an F32
2206    fn f32_neg(
2207        &mut self,
2208        loc: Location<Self::GPR, Self::SIMD>,
2209        ret: Location<Self::GPR, Self::SIMD>,
2210    ) -> Result<(), CompileError>;
2211    /// Get the Absolute Value of an F32
2212    fn f32_abs(
2213        &mut self,
2214        loc: Location<Self::GPR, Self::SIMD>,
2215        ret: Location<Self::GPR, Self::SIMD>,
2216    ) -> Result<(), CompileError>;
2217    /// Copy sign from tmp1 Self::GPR to tmp2 Self::GPR
2218    fn emit_i32_copysign(&mut self, tmp1: Self::GPR, tmp2: Self::GPR) -> Result<(), CompileError>;
2219    /// Get the Square Root of an F32
2220    fn f32_sqrt(
2221        &mut self,
2222        loc: Location<Self::GPR, Self::SIMD>,
2223        ret: Location<Self::GPR, Self::SIMD>,
2224    ) -> Result<(), CompileError>;
2225    /// Trunc of an F32
2226    fn f32_trunc(
2227        &mut self,
2228        loc: Location<Self::GPR, Self::SIMD>,
2229        ret: Location<Self::GPR, Self::SIMD>,
2230    ) -> Result<(), CompileError>;
2231    /// Ceil of an F32
2232    fn f32_ceil(
2233        &mut self,
2234        loc: Location<Self::GPR, Self::SIMD>,
2235        ret: Location<Self::GPR, Self::SIMD>,
2236    ) -> Result<(), CompileError>;
2237    /// Floor of an F32
2238    fn f32_floor(
2239        &mut self,
2240        loc: Location<Self::GPR, Self::SIMD>,
2241        ret: Location<Self::GPR, Self::SIMD>,
2242    ) -> Result<(), CompileError>;
2243    /// Round at nearest int of an F32
2244    fn f32_nearest(
2245        &mut self,
2246        loc: Location<Self::GPR, Self::SIMD>,
2247        ret: Location<Self::GPR, Self::SIMD>,
2248    ) -> Result<(), CompileError>;
2249    /// Greater of Equal Compare 2 F32, result in a GPR
2250    fn f32_cmp_ge(
2251        &mut self,
2252        loc_a: Location<Self::GPR, Self::SIMD>,
2253        loc_b: Location<Self::GPR, Self::SIMD>,
2254        ret: Location<Self::GPR, Self::SIMD>,
2255    ) -> Result<(), CompileError>;
2256    /// Greater Than Compare 2 F32, result in a GPR
2257    fn f32_cmp_gt(
2258        &mut self,
2259        loc_a: Location<Self::GPR, Self::SIMD>,
2260        loc_b: Location<Self::GPR, Self::SIMD>,
2261        ret: Location<Self::GPR, Self::SIMD>,
2262    ) -> Result<(), CompileError>;
2263    /// Less of Equal Compare 2 F32, result in a GPR
2264    fn f32_cmp_le(
2265        &mut self,
2266        loc_a: Location<Self::GPR, Self::SIMD>,
2267        loc_b: Location<Self::GPR, Self::SIMD>,
2268        ret: Location<Self::GPR, Self::SIMD>,
2269    ) -> Result<(), CompileError>;
2270    /// Less Than Compare 2 F32, result in a GPR
2271    fn f32_cmp_lt(
2272        &mut self,
2273        loc_a: Location<Self::GPR, Self::SIMD>,
2274        loc_b: Location<Self::GPR, Self::SIMD>,
2275        ret: Location<Self::GPR, Self::SIMD>,
2276    ) -> Result<(), CompileError>;
2277    /// Not Equal Compare 2 F32, result in a GPR
2278    fn f32_cmp_ne(
2279        &mut self,
2280        loc_a: Location<Self::GPR, Self::SIMD>,
2281        loc_b: Location<Self::GPR, Self::SIMD>,
2282        ret: Location<Self::GPR, Self::SIMD>,
2283    ) -> Result<(), CompileError>;
2284    /// Equal Compare 2 F32, result in a GPR
2285    fn f32_cmp_eq(
2286        &mut self,
2287        loc_a: Location<Self::GPR, Self::SIMD>,
2288        loc_b: Location<Self::GPR, Self::SIMD>,
2289        ret: Location<Self::GPR, Self::SIMD>,
2290    ) -> Result<(), CompileError>;
2291    /// get Min for 2 F32 values
2292    fn f32_min(
2293        &mut self,
2294        loc_a: Location<Self::GPR, Self::SIMD>,
2295        loc_b: Location<Self::GPR, Self::SIMD>,
2296        ret: Location<Self::GPR, Self::SIMD>,
2297    ) -> Result<(), CompileError>;
2298    /// get Max for 2 F32 values
2299    fn f32_max(
2300        &mut self,
2301        loc_a: Location<Self::GPR, Self::SIMD>,
2302        loc_b: Location<Self::GPR, Self::SIMD>,
2303        ret: Location<Self::GPR, Self::SIMD>,
2304    ) -> Result<(), CompileError>;
2305    /// Add 2 F32 values
2306    fn f32_add(
2307        &mut self,
2308        loc_a: Location<Self::GPR, Self::SIMD>,
2309        loc_b: Location<Self::GPR, Self::SIMD>,
2310        ret: Location<Self::GPR, Self::SIMD>,
2311    ) -> Result<(), CompileError>;
2312    /// Sub 2 F32 values
2313    fn f32_sub(
2314        &mut self,
2315        loc_a: Location<Self::GPR, Self::SIMD>,
2316        loc_b: Location<Self::GPR, Self::SIMD>,
2317        ret: Location<Self::GPR, Self::SIMD>,
2318    ) -> Result<(), CompileError>;
2319    /// Multiply 2 F32 values
2320    fn f32_mul(
2321        &mut self,
2322        loc_a: Location<Self::GPR, Self::SIMD>,
2323        loc_b: Location<Self::GPR, Self::SIMD>,
2324        ret: Location<Self::GPR, Self::SIMD>,
2325    ) -> Result<(), CompileError>;
2326    /// Divide 2 F32 values
2327    fn f32_div(
2328        &mut self,
2329        loc_a: Location<Self::GPR, Self::SIMD>,
2330        loc_b: Location<Self::GPR, Self::SIMD>,
2331        ret: Location<Self::GPR, Self::SIMD>,
2332    ) -> Result<(), CompileError>;
2333
2334    /// Standard function Trampoline generation
2335    fn gen_std_trampoline(
2336        &self,
2337        sig: &FunctionType,
2338        calling_convention: CallingConvention,
2339    ) -> Result<FunctionBody, CompileError>;
2340    /// Generates dynamic import function call trampoline for a function type.
2341    fn gen_std_dynamic_import_trampoline(
2342        &self,
2343        vmoffsets: &VMOffsets,
2344        sig: &FunctionType,
2345        calling_convention: CallingConvention,
2346    ) -> Result<FunctionBody, CompileError>;
2347    /// Singlepass calls import functions through a trampoline.
2348    fn gen_import_call_trampoline(
2349        &self,
2350        vmoffsets: &VMOffsets,
2351        index: FunctionIndex,
2352        sig: &FunctionType,
2353        calling_convention: CallingConvention,
2354    ) -> Result<CustomSection, CompileError>;
2355    /// generate eh_frame instruction (or None if not possible / supported)
2356    fn gen_dwarf_unwind_info(&mut self, code_len: usize) -> Option<UnwindInstructions>;
2357    /// generate Windows unwind instructions (or None if not possible / supported)
2358    fn gen_windows_unwind_info(&mut self, code_len: usize) -> Option<Vec<u8>>;
2359}
2360
2361/// Standard entry trampoline generation
2362pub fn gen_std_trampoline(
2363    sig: &FunctionType,
2364    target: &Target,
2365    calling_convention: CallingConvention,
2366) -> Result<FunctionBody, CompileError> {
2367    match target.triple().architecture {
2368        Architecture::X86_64 => {
2369            let machine = MachineX86_64::new(Some(target.clone()))?;
2370            machine.gen_std_trampoline(sig, calling_convention)
2371        }
2372        Architecture::Aarch64(_) => {
2373            let machine = MachineARM64::new(Some(target.clone()));
2374            machine.gen_std_trampoline(sig, calling_convention)
2375        }
2376        Architecture::Riscv64(_) => {
2377            let machine = MachineRiscv::new(Some(target.clone()))?;
2378            machine.gen_std_trampoline(sig, calling_convention)
2379        }
2380        _ => Err(CompileError::UnsupportedTarget(
2381            "singlepass unimplemented arch for gen_std_trampoline".to_owned(),
2382        )),
2383    }
2384}
2385
2386/// Generates dynamic import function call trampoline for a function type.
2387pub fn gen_std_dynamic_import_trampoline(
2388    vmoffsets: &VMOffsets,
2389    sig: &FunctionType,
2390    target: &Target,
2391    calling_convention: CallingConvention,
2392) -> Result<FunctionBody, CompileError> {
2393    match target.triple().architecture {
2394        Architecture::X86_64 => {
2395            let machine = MachineX86_64::new(Some(target.clone()))?;
2396            machine.gen_std_dynamic_import_trampoline(vmoffsets, sig, calling_convention)
2397        }
2398        Architecture::Aarch64(_) => {
2399            let machine = MachineARM64::new(Some(target.clone()));
2400            machine.gen_std_dynamic_import_trampoline(vmoffsets, sig, calling_convention)
2401        }
2402        Architecture::Riscv64(_) => {
2403            let machine = MachineRiscv::new(Some(target.clone()))?;
2404            machine.gen_std_dynamic_import_trampoline(vmoffsets, sig, calling_convention)
2405        }
2406        _ => Err(CompileError::UnsupportedTarget(
2407            "singlepass unimplemented arch for gen_std_dynamic_import_trampoline".to_owned(),
2408        )),
2409    }
2410}
2411/// Singlepass calls import functions through a trampoline.
2412pub fn gen_import_call_trampoline(
2413    vmoffsets: &VMOffsets,
2414    index: FunctionIndex,
2415    sig: &FunctionType,
2416    target: &Target,
2417    calling_convention: CallingConvention,
2418) -> Result<CustomSection, CompileError> {
2419    match target.triple().architecture {
2420        Architecture::X86_64 => {
2421            let machine = MachineX86_64::new(Some(target.clone()))?;
2422            machine.gen_import_call_trampoline(vmoffsets, index, sig, calling_convention)
2423        }
2424        Architecture::Aarch64(_) => {
2425            let machine = MachineARM64::new(Some(target.clone()));
2426            machine.gen_import_call_trampoline(vmoffsets, index, sig, calling_convention)
2427        }
2428        Architecture::Riscv64(_) => {
2429            let machine = MachineRiscv::new(Some(target.clone()))?;
2430            machine.gen_import_call_trampoline(vmoffsets, index, sig, calling_convention)
2431        }
2432        _ => Err(CompileError::UnsupportedTarget(
2433            "singlepass unimplemented arch for gen_import_call_trampoline".to_owned(),
2434        )),
2435    }
2436}
2437
2438// Constants for the bounds of truncation operations. These are the least or
2439// greatest exact floats in either f32 or f64 representation less-than (for
2440// least) or greater-than (for greatest) the i32 or i64 or u32 or u64
2441// min (for least) or max (for greatest), when rounding towards zero.
2442
2443/// Greatest Exact Float (32 bits) less-than i32::MIN when rounding towards zero.
2444pub const GEF32_LT_I32_MIN: f32 = -2147483904.0;
2445/// Least Exact Float (32 bits) greater-than i32::MAX when rounding towards zero.
2446pub const LEF32_GT_I32_MAX: f32 = 2147483648.0;
2447/// Greatest Exact Float (32 bits) less-than i64::MIN when rounding towards zero.
2448pub const GEF32_LT_I64_MIN: f32 = -9223373136366403584.0;
2449/// Least Exact Float (32 bits) greater-than i64::MAX when rounding towards zero.
2450pub const LEF32_GT_I64_MAX: f32 = 9223372036854775808.0;
2451/// Greatest Exact Float (32 bits) less-than u32::MIN when rounding towards zero.
2452pub const GEF32_LT_U32_MIN: f32 = -1.0;
2453/// Least Exact Float (32 bits) greater-than u32::MAX when rounding towards zero.
2454pub const LEF32_GT_U32_MAX: f32 = 4294967296.0;
2455/// Greatest Exact Float (32 bits) less-than u64::MIN when rounding towards zero.
2456pub const GEF32_LT_U64_MIN: f32 = -1.0;
2457/// Least Exact Float (32 bits) greater-than u64::MAX when rounding towards zero.
2458pub const LEF32_GT_U64_MAX: f32 = 18446744073709551616.0;
2459
2460/// Greatest Exact Float (64 bits) less-than i32::MIN when rounding towards zero.
2461pub const GEF64_LT_I32_MIN: f64 = -2147483649.0;
2462/// Least Exact Float (64 bits) greater-than i32::MAX when rounding towards zero.
2463pub const LEF64_GT_I32_MAX: f64 = 2147483648.0;
2464/// Greatest Exact Float (64 bits) less-than i64::MIN when rounding towards zero.
2465pub const GEF64_LT_I64_MIN: f64 = -9223372036854777856.0;
2466/// Least Exact Float (64 bits) greater-than i64::MAX when rounding towards zero.
2467pub const LEF64_GT_I64_MAX: f64 = 9223372036854775808.0;
2468/// Greatest Exact Float (64 bits) less-than u32::MIN when rounding towards zero.
2469pub const GEF64_LT_U32_MIN: f64 = -1.0;
2470/// Least Exact Float (64 bits) greater-than u32::MAX when rounding towards zero.
2471pub const LEF64_GT_U32_MAX: f64 = 4294967296.0;
2472/// Greatest Exact Float (64 bits) less-than u64::MIN when rounding towards zero.
2473pub const GEF64_LT_U64_MIN: f64 = -1.0;
2474/// Least Exact Float (64 bits) greater-than u64::MAX when rounding towards zero.
2475pub const LEF64_GT_U64_MAX: f64 = 18446744073709551616.0;