wasmer_compiler_singlepass/
machine.rs

1use crate::{
2    common_decl::*,
3    location::{Location, Reg},
4    machine_arm64::MachineARM64,
5    machine_x64::MachineX86_64,
6    unwind::UnwindInstructions,
7};
8use dynasmrt::{AssemblyOffset, DynamicLabel};
9use std::{collections::BTreeMap, fmt::Debug};
10use wasmer_compiler::{
11    types::{
12        address_map::InstructionAddressMap,
13        function::FunctionBody,
14        relocation::{Relocation, RelocationTarget},
15        section::CustomSection,
16    },
17    wasmparser::{MemArg, ValType as WpType},
18};
19use wasmer_types::{
20    CompileError, FunctionIndex, FunctionType, TrapCode, TrapInformation, VMOffsets,
21    target::{Architecture, CallingConvention, Target},
22};
23pub type Label = DynamicLabel;
24pub type Offset = AssemblyOffset;
25
26#[allow(dead_code)]
27#[derive(Clone, PartialEq)]
28pub enum Value {
29    I8(i8),
30    I32(i32),
31    I64(i64),
32    F32(f32),
33    F64(f64),
34}
35
36#[macro_export]
37macro_rules! codegen_error {
38    ($($arg:tt)*) => {return Err(CompileError::Codegen(format!($($arg)*)))}
39}
40
41#[allow(unused)]
42pub trait MaybeImmediate {
43    fn imm_value(&self) -> Option<Value>;
44    fn is_imm(&self) -> bool {
45        self.imm_value().is_some()
46    }
47}
48
49/// A trap table for a `RunnableModuleInfo`.
50#[derive(Clone, Debug, Default)]
51pub struct TrapTable {
52    /// Mappings from offsets in generated machine code to the corresponding trap code.
53    pub offset_to_code: BTreeMap<usize, TrapCode>,
54}
55
56// all machine seems to have a page this size, so not per arch for now
57pub const NATIVE_PAGE_SIZE: usize = 4096;
58
59pub struct MachineStackOffset(pub usize);
60
61#[allow(dead_code)]
62pub enum UnsignedCondition {
63    Equal,
64    NotEqual,
65    Above,
66    AboveEqual,
67    Below,
68    BelowEqual,
69}
70
71#[allow(unused)]
72pub trait Machine {
73    type GPR: Copy + Eq + Debug + Reg;
74    type SIMD: Copy + Eq + Debug + Reg;
75    /// Get current assembler offset
76    fn assembler_get_offset(&self) -> Offset;
77    /// Convert from a GPR register to index register
78    fn index_from_gpr(&self, x: Self::GPR) -> RegisterIndex;
79    /// Convert from an SIMD register
80    fn index_from_simd(&self, x: Self::SIMD) -> RegisterIndex;
81    /// Get the GPR that hold vmctx
82    fn get_vmctx_reg(&self) -> Self::GPR;
83    /// Picks an unused general purpose register for local/stack/argument use.
84    ///
85    /// This method does not mark the register as used
86    fn pick_gpr(&self) -> Option<Self::GPR>;
87    /// Picks an unused general purpose register for internal temporary use.
88    ///
89    /// This method does not mark the register as used
90    fn pick_temp_gpr(&self) -> Option<Self::GPR>;
91    /// Get all used GPR
92    fn get_used_gprs(&self) -> Vec<Self::GPR>;
93    /// Get all used SIMD regs
94    fn get_used_simd(&self) -> Vec<Self::SIMD>;
95    /// Picks an unused general pupose register and mark it as used
96    fn acquire_temp_gpr(&mut self) -> Option<Self::GPR>;
97    /// Releases a temporary GPR.
98    fn release_gpr(&mut self, gpr: Self::GPR);
99    /// Specify that a given register is in use.
100    fn reserve_unused_temp_gpr(&mut self, gpr: Self::GPR) -> Self::GPR;
101    /// reserve a GPR
102    fn reserve_gpr(&mut self, gpr: Self::GPR);
103    /// Push used gpr to the stack. Return the bytes taken on the stack
104    fn push_used_gpr(&mut self, grps: &[Self::GPR]) -> Result<usize, CompileError>;
105    /// Pop used gpr to the stack
106    fn pop_used_gpr(&mut self, grps: &[Self::GPR]) -> Result<(), CompileError>;
107    /// Picks an unused SIMD register.
108    ///
109    /// This method does not mark the register as used
110    fn pick_simd(&self) -> Option<Self::SIMD>;
111    /// Picks an unused SIMD register for internal temporary use.
112    ///
113    /// This method does not mark the register as used
114    fn pick_temp_simd(&self) -> Option<Self::SIMD>;
115    /// Acquires a temporary XMM register.
116    fn acquire_temp_simd(&mut self) -> Option<Self::SIMD>;
117    /// reserve a SIMD register
118    fn reserve_simd(&mut self, simd: Self::SIMD);
119    /// Releases a temporary XMM register.
120    fn release_simd(&mut self, simd: Self::SIMD);
121    /// Push used simd regs to the stack. Return bytes taken on the stack
122    fn push_used_simd(&mut self, simds: &[Self::SIMD]) -> Result<usize, CompileError>;
123    /// Pop used simd regs to the stack
124    fn pop_used_simd(&mut self, simds: &[Self::SIMD]) -> Result<(), CompileError>;
125    /// Return a rounded stack adjustement value (must be multiple of 16bytes on ARM64 for example)
126    fn round_stack_adjust(&self, value: usize) -> usize;
127    /// Set the source location of the Wasm to the given offset.
128    fn set_srcloc(&mut self, offset: u32);
129    /// Marks each address in the code range emitted by `f` with the trap code `code`.
130    fn mark_address_range_with_trap_code(&mut self, code: TrapCode, begin: usize, end: usize);
131    /// Marks one address as trappable with trap code `code`.
132    fn mark_address_with_trap_code(&mut self, code: TrapCode);
133    /// Marks the instruction as trappable with trap code `code`. return "begin" offset
134    fn mark_instruction_with_trap_code(&mut self, code: TrapCode) -> usize;
135    /// Pushes the instruction to the address map, calculating the offset from a
136    /// provided beginning address.
137    fn mark_instruction_address_end(&mut self, begin: usize);
138    /// Insert a StackOverflow (at offset 0)
139    fn insert_stackoverflow(&mut self);
140    /// Get all current TrapInformation
141    fn collect_trap_information(&self) -> Vec<TrapInformation>;
142    // Get all intructions address map
143    fn instructions_address_map(&self) -> Vec<InstructionAddressMap>;
144    /// Memory location for a local on the stack
145    /// Like Location::Memory(GPR::RBP, -(self.stack_offset.0 as i32)) for x86_64
146    fn local_on_stack(&mut self, stack_offset: i32) -> Location<Self::GPR, Self::SIMD>;
147    /// Adjust stack for locals
148    /// Like assembler.emit_sub(Size::S64, Location::Imm32(delta_stack_offset as u32), Location::GPR(GPR::RSP))
149    fn adjust_stack(&mut self, delta_stack_offset: u32) -> Result<(), CompileError>;
150    /// restore stack
151    /// Like assembler.emit_add(Size::S64, Location::Imm32(delta_stack_offset as u32), Location::GPR(GPR::RSP))
152    fn restore_stack(&mut self, delta_stack_offset: u32) -> Result<(), CompileError>;
153    /// Pop stack of locals
154    /// Like assembler.emit_add(Size::S64, Location::Imm32(delta_stack_offset as u32), Location::GPR(GPR::RSP))
155    fn pop_stack_locals(&mut self, delta_stack_offset: u32) -> Result<(), CompileError>;
156    /// Zero a location taht is 32bits
157    fn zero_location(
158        &mut self,
159        size: Size,
160        location: Location<Self::GPR, Self::SIMD>,
161    ) -> Result<(), CompileError>;
162    /// GPR Reg used for local pointer on the stack
163    fn local_pointer(&self) -> Self::GPR;
164    /// push a value on the stack for a native call
165    fn move_location_for_native(
166        &mut self,
167        size: Size,
168        loc: Location<Self::GPR, Self::SIMD>,
169        dest: Location<Self::GPR, Self::SIMD>,
170    ) -> Result<(), CompileError>;
171    /// Determine whether a local should be allocated on the stack.
172    fn is_local_on_stack(&self, idx: usize) -> bool;
173    /// Determine a local's location.
174    fn get_local_location(
175        &self,
176        idx: usize,
177        callee_saved_regs_size: usize,
178    ) -> Location<Self::GPR, Self::SIMD>;
179    /// Move a local to the stack
180    /// Like emit_mov(Size::S64, location, Location::Memory(GPR::RBP, -(self.stack_offset.0 as i32)));
181    fn move_local(
182        &mut self,
183        stack_offset: i32,
184        location: Location<Self::GPR, Self::SIMD>,
185    ) -> Result<(), CompileError>;
186    /// List of register to save, depending on the CallingConvention
187    fn list_to_save(
188        &self,
189        calling_convention: CallingConvention,
190    ) -> Vec<Location<Self::GPR, Self::SIMD>>;
191    /// Get param location (to build a call, using SP for stack args)
192    fn get_param_location(
193        &self,
194        idx: usize,
195        sz: Size,
196        stack_offset: &mut usize,
197        calling_convention: CallingConvention,
198    ) -> Location<Self::GPR, Self::SIMD>;
199    /// Get call param location (from a call, using FP for stack args)
200    fn get_call_param_location(
201        &self,
202        idx: usize,
203        sz: Size,
204        stack_offset: &mut usize,
205        calling_convention: CallingConvention,
206    ) -> Location<Self::GPR, Self::SIMD>;
207    /// Get simple param location
208    fn get_simple_param_location(
209        &self,
210        idx: usize,
211        calling_convention: CallingConvention,
212    ) -> Location<Self::GPR, Self::SIMD>;
213    /// move a location to another
214    fn move_location(
215        &mut self,
216        size: Size,
217        source: Location<Self::GPR, Self::SIMD>,
218        dest: Location<Self::GPR, Self::SIMD>,
219    ) -> Result<(), CompileError>;
220    /// move a location to another, with zero or sign extension
221    fn move_location_extend(
222        &mut self,
223        size_val: Size,
224        signed: bool,
225        source: Location<Self::GPR, Self::SIMD>,
226        size_op: Size,
227        dest: Location<Self::GPR, Self::SIMD>,
228    ) -> Result<(), CompileError>;
229    /// Load a memory value to a register, zero extending to 64bits.
230    /// Panic if gpr is not a Location::GPR or if mem is not a Memory(2)
231    fn load_address(
232        &mut self,
233        size: Size,
234        gpr: Location<Self::GPR, Self::SIMD>,
235        mem: Location<Self::GPR, Self::SIMD>,
236    ) -> Result<(), CompileError>;
237    /// Init the stack loc counter
238    fn init_stack_loc(
239        &mut self,
240        init_stack_loc_cnt: u64,
241        last_stack_loc: Location<Self::GPR, Self::SIMD>,
242    ) -> Result<(), CompileError>;
243    /// Restore save_area
244    fn restore_saved_area(&mut self, saved_area_offset: i32) -> Result<(), CompileError>;
245    /// Pop a location
246    fn pop_location(
247        &mut self,
248        location: Location<Self::GPR, Self::SIMD>,
249    ) -> Result<(), CompileError>;
250    /// Create a new `MachineState` with default values.
251    fn new_machine_state(&self) -> MachineState;
252
253    /// Finalize the assembler
254    fn assembler_finalize(self) -> Result<Vec<u8>, CompileError>;
255
256    /// get_offset of Assembler
257    fn get_offset(&self) -> Offset;
258
259    /// finalize a function
260    fn finalize_function(&mut self) -> Result<(), CompileError>;
261
262    /// emit native function prolog (depending on the calling Convention, like "PUSH RBP / MOV RSP, RBP")
263    fn emit_function_prolog(&mut self) -> Result<(), CompileError>;
264    /// emit native function epilog (depending on the calling Convention, like "MOV RBP, RSP / POP RBP")
265    fn emit_function_epilog(&mut self) -> Result<(), CompileError>;
266    /// handle return value, with optionnal cannonicalization if wanted
267    fn emit_function_return_value(
268        &mut self,
269        ty: WpType,
270        cannonicalize: bool,
271        loc: Location<Self::GPR, Self::SIMD>,
272    ) -> Result<(), CompileError>;
273    /// Handle copy to SIMD register from ret value (if needed by the arch/calling convention)
274    fn emit_function_return_float(&mut self) -> Result<(), CompileError>;
275    /// Is NaN canonicalization supported
276    fn arch_supports_canonicalize_nan(&self) -> bool;
277    /// Cannonicalize a NaN (or panic if not supported)
278    fn canonicalize_nan(
279        &mut self,
280        sz: Size,
281        input: Location<Self::GPR, Self::SIMD>,
282        output: Location<Self::GPR, Self::SIMD>,
283    ) -> Result<(), CompileError>;
284
285    /// emit an Illegal Opcode, associated with a trapcode
286    fn emit_illegal_op(&mut self, trp: TrapCode) -> Result<(), CompileError>;
287    /// create a new label
288    fn get_label(&mut self) -> Label;
289    /// emit a label
290    fn emit_label(&mut self, label: Label) -> Result<(), CompileError>;
291
292    /// get the gpr use for call. like RAX on x86_64
293    fn get_grp_for_call(&self) -> Self::GPR;
294    /// Emit a call using the value in register
295    fn emit_call_register(&mut self, register: Self::GPR) -> Result<(), CompileError>;
296    /// Emit a call to a label
297    fn emit_call_label(&mut self, label: Label) -> Result<(), CompileError>;
298    /// Does an trampoline is neededfor indirect call
299    fn arch_requires_indirect_call_trampoline(&self) -> bool;
300    /// indirect call with trampoline
301    fn arch_emit_indirect_call_with_trampoline(
302        &mut self,
303        location: Location<Self::GPR, Self::SIMD>,
304    ) -> Result<(), CompileError>;
305    /// emit a call to a location
306    fn emit_call_location(
307        &mut self,
308        location: Location<Self::GPR, Self::SIMD>,
309    ) -> Result<(), CompileError>;
310    /// get the gpr for the return of generic values
311    fn get_gpr_for_ret(&self) -> Self::GPR;
312    /// get the simd for the return of float/double values
313    fn get_simd_for_ret(&self) -> Self::SIMD;
314
315    /// Emit a debug breakpoint
316    fn emit_debug_breakpoint(&mut self) -> Result<(), CompileError>;
317
318    /// load the address of a memory location (will panic if src is not a memory)
319    /// like LEA opcode on x86_64
320    fn location_address(
321        &mut self,
322        size: Size,
323        source: Location<Self::GPR, Self::SIMD>,
324        dest: Location<Self::GPR, Self::SIMD>,
325    ) -> Result<(), CompileError>;
326
327    /// And src & dst -> dst (with or without flags)
328    fn location_and(
329        &mut self,
330        size: Size,
331        source: Location<Self::GPR, Self::SIMD>,
332        dest: Location<Self::GPR, Self::SIMD>,
333        flags: bool,
334    ) -> Result<(), CompileError>;
335    /// Xor src & dst -> dst (with or without flags)
336    fn location_xor(
337        &mut self,
338        size: Size,
339        source: Location<Self::GPR, Self::SIMD>,
340        dest: Location<Self::GPR, Self::SIMD>,
341        flags: bool,
342    ) -> Result<(), CompileError>;
343    /// Or src & dst -> dst (with or without flags)
344    fn location_or(
345        &mut self,
346        size: Size,
347        source: Location<Self::GPR, Self::SIMD>,
348        dest: Location<Self::GPR, Self::SIMD>,
349        flags: bool,
350    ) -> Result<(), CompileError>;
351
352    /// Add src+dst -> dst (with or without flags)
353    fn location_add(
354        &mut self,
355        size: Size,
356        source: Location<Self::GPR, Self::SIMD>,
357        dest: Location<Self::GPR, Self::SIMD>,
358        flags: bool,
359    ) -> Result<(), CompileError>;
360    /// Sub dst-src -> dst (with or without flags)
361    fn location_sub(
362        &mut self,
363        size: Size,
364        source: Location<Self::GPR, Self::SIMD>,
365        dest: Location<Self::GPR, Self::SIMD>,
366        flags: bool,
367    ) -> Result<(), CompileError>;
368    /// -src -> dst
369    fn location_neg(
370        &mut self,
371        size_val: Size, // size of src
372        signed: bool,
373        source: Location<Self::GPR, Self::SIMD>,
374        size_op: Size,
375        dest: Location<Self::GPR, Self::SIMD>,
376    ) -> Result<(), CompileError>;
377
378    /// Cmp src - dst and set flags
379    fn location_cmp(
380        &mut self,
381        size: Size,
382        source: Location<Self::GPR, Self::SIMD>,
383        dest: Location<Self::GPR, Self::SIMD>,
384    ) -> Result<(), CompileError>;
385    /// Test src & dst and set flags
386    fn location_test(
387        &mut self,
388        size: Size,
389        source: Location<Self::GPR, Self::SIMD>,
390        dest: Location<Self::GPR, Self::SIMD>,
391    ) -> Result<(), CompileError>;
392
393    /// jmp without condidtion
394    fn jmp_unconditionnal(&mut self, label: Label) -> Result<(), CompileError>;
395
396    /// jmp to label if the provided condition is true (when comparing source and dest)
397    fn jmp_on_condition(
398        &mut self,
399        cond: UnsignedCondition,
400        size: Size,
401        source: Location<Self::GPR, Self::SIMD>,
402        dest: Location<Self::GPR, Self::SIMD>,
403        label: Label,
404    ) -> Result<(), CompileError>;
405
406    /// jmp using a jump table at lable with cond as the indice
407    fn emit_jmp_to_jumptable(
408        &mut self,
409        label: Label,
410        cond: Location<Self::GPR, Self::SIMD>,
411    ) -> Result<(), CompileError>;
412
413    /// Align for Loop (may do nothing, depending on the arch)
414    fn align_for_loop(&mut self) -> Result<(), CompileError>;
415
416    /// ret (from a Call)
417    fn emit_ret(&mut self) -> Result<(), CompileError>;
418
419    /// Stack push of a location
420    fn emit_push(
421        &mut self,
422        size: Size,
423        loc: Location<Self::GPR, Self::SIMD>,
424    ) -> Result<(), CompileError>;
425    /// Stack pop of a location
426    fn emit_pop(
427        &mut self,
428        size: Size,
429        loc: Location<Self::GPR, Self::SIMD>,
430    ) -> Result<(), CompileError>;
431    /// relaxed mov: move from anywhere to anywhere
432    fn emit_relaxed_mov(
433        &mut self,
434        sz: Size,
435        src: Location<Self::GPR, Self::SIMD>,
436        dst: Location<Self::GPR, Self::SIMD>,
437    ) -> Result<(), CompileError>;
438    /// relaxed cmp: compare from anywhere and anywhere
439    fn emit_relaxed_cmp(
440        &mut self,
441        sz: Size,
442        src: Location<Self::GPR, Self::SIMD>,
443        dst: Location<Self::GPR, Self::SIMD>,
444    ) -> Result<(), CompileError>;
445    /// Emit a memory fence. Can be nothing for x86_64 or a DMB on ARM64 for example
446    fn emit_memory_fence(&mut self) -> Result<(), CompileError>;
447    /// relaxed move with zero extension
448    fn emit_relaxed_zero_extension(
449        &mut self,
450        sz_src: Size,
451        src: Location<Self::GPR, Self::SIMD>,
452        sz_dst: Size,
453        dst: Location<Self::GPR, Self::SIMD>,
454    ) -> Result<(), CompileError>;
455    /// relaxed move with sign extension
456    fn emit_relaxed_sign_extension(
457        &mut self,
458        sz_src: Size,
459        src: Location<Self::GPR, Self::SIMD>,
460        sz_dst: Size,
461        dst: Location<Self::GPR, Self::SIMD>,
462    ) -> Result<(), CompileError>;
463    /// Multiply location with immediate
464    fn emit_imul_imm32(
465        &mut self,
466        size: Size,
467        imm32: u32,
468        gpr: Self::GPR,
469    ) -> Result<(), CompileError>;
470    /// Add with location directly from the stack
471    fn emit_binop_add32(
472        &mut self,
473        loc_a: Location<Self::GPR, Self::SIMD>,
474        loc_b: Location<Self::GPR, Self::SIMD>,
475        ret: Location<Self::GPR, Self::SIMD>,
476    ) -> Result<(), CompileError>;
477    /// Sub with location directly from the stack
478    fn emit_binop_sub32(
479        &mut self,
480        loc_a: Location<Self::GPR, Self::SIMD>,
481        loc_b: Location<Self::GPR, Self::SIMD>,
482        ret: Location<Self::GPR, Self::SIMD>,
483    ) -> Result<(), CompileError>;
484    /// Multiply with location directly from the stack
485    fn emit_binop_mul32(
486        &mut self,
487        loc_a: Location<Self::GPR, Self::SIMD>,
488        loc_b: Location<Self::GPR, Self::SIMD>,
489        ret: Location<Self::GPR, Self::SIMD>,
490    ) -> Result<(), CompileError>;
491    /// Unsigned Division with location directly from the stack. return the offset of the DIV opcode, to mark as trappable.
492    fn emit_binop_udiv32(
493        &mut self,
494        loc_a: Location<Self::GPR, Self::SIMD>,
495        loc_b: Location<Self::GPR, Self::SIMD>,
496        ret: Location<Self::GPR, Self::SIMD>,
497        integer_division_by_zero: Label,
498        integer_overflow: Label,
499    ) -> Result<usize, CompileError>;
500    /// Signed Division with location directly from the stack. return the offset of the DIV opcode, to mark as trappable.
501    fn emit_binop_sdiv32(
502        &mut self,
503        loc_a: Location<Self::GPR, Self::SIMD>,
504        loc_b: Location<Self::GPR, Self::SIMD>,
505        ret: Location<Self::GPR, Self::SIMD>,
506        integer_division_by_zero: Label,
507        integer_overflow: Label,
508    ) -> Result<usize, CompileError>;
509    /// Unsigned Reminder (of a division) with location directly from the stack. return the offset of the DIV opcode, to mark as trappable.
510    fn emit_binop_urem32(
511        &mut self,
512        loc_a: Location<Self::GPR, Self::SIMD>,
513        loc_b: Location<Self::GPR, Self::SIMD>,
514        ret: Location<Self::GPR, Self::SIMD>,
515        integer_division_by_zero: Label,
516        integer_overflow: Label,
517    ) -> Result<usize, CompileError>;
518    /// Signed Reminder (of a Division) with location directly from the stack. return the offset of the DIV opcode, to mark as trappable.
519    fn emit_binop_srem32(
520        &mut self,
521        loc_a: Location<Self::GPR, Self::SIMD>,
522        loc_b: Location<Self::GPR, Self::SIMD>,
523        ret: Location<Self::GPR, Self::SIMD>,
524        integer_division_by_zero: Label,
525        integer_overflow: Label,
526    ) -> Result<usize, CompileError>;
527    /// And with location directly from the stack
528    fn emit_binop_and32(
529        &mut self,
530        loc_a: Location<Self::GPR, Self::SIMD>,
531        loc_b: Location<Self::GPR, Self::SIMD>,
532        ret: Location<Self::GPR, Self::SIMD>,
533    ) -> Result<(), CompileError>;
534    /// Or with location directly from the stack
535    fn emit_binop_or32(
536        &mut self,
537        loc_a: Location<Self::GPR, Self::SIMD>,
538        loc_b: Location<Self::GPR, Self::SIMD>,
539        ret: Location<Self::GPR, Self::SIMD>,
540    ) -> Result<(), CompileError>;
541    /// Xor with location directly from the stack
542    fn emit_binop_xor32(
543        &mut self,
544        loc_a: Location<Self::GPR, Self::SIMD>,
545        loc_b: Location<Self::GPR, Self::SIMD>,
546        ret: Location<Self::GPR, Self::SIMD>,
547    ) -> Result<(), CompileError>;
548    /// Signed Greater of Equal Compare 2 i32, result in a GPR
549    fn i32_cmp_ge_s(
550        &mut self,
551        loc_a: Location<Self::GPR, Self::SIMD>,
552        loc_b: Location<Self::GPR, Self::SIMD>,
553        ret: Location<Self::GPR, Self::SIMD>,
554    ) -> Result<(), CompileError>;
555    /// Signed Greater Than Compare 2 i32, result in a GPR
556    fn i32_cmp_gt_s(
557        &mut self,
558        loc_a: Location<Self::GPR, Self::SIMD>,
559        loc_b: Location<Self::GPR, Self::SIMD>,
560        ret: Location<Self::GPR, Self::SIMD>,
561    ) -> Result<(), CompileError>;
562    /// Signed Less of Equal Compare 2 i32, result in a GPR
563    fn i32_cmp_le_s(
564        &mut self,
565        loc_a: Location<Self::GPR, Self::SIMD>,
566        loc_b: Location<Self::GPR, Self::SIMD>,
567        ret: Location<Self::GPR, Self::SIMD>,
568    ) -> Result<(), CompileError>;
569    /// Signed Less Than Compare 2 i32, result in a GPR
570    fn i32_cmp_lt_s(
571        &mut self,
572        loc_a: Location<Self::GPR, Self::SIMD>,
573        loc_b: Location<Self::GPR, Self::SIMD>,
574        ret: Location<Self::GPR, Self::SIMD>,
575    ) -> Result<(), CompileError>;
576    /// Unsigned Greater of Equal Compare 2 i32, result in a GPR
577    fn i32_cmp_ge_u(
578        &mut self,
579        loc_a: Location<Self::GPR, Self::SIMD>,
580        loc_b: Location<Self::GPR, Self::SIMD>,
581        ret: Location<Self::GPR, Self::SIMD>,
582    ) -> Result<(), CompileError>;
583    /// Unsigned Greater Than Compare 2 i32, result in a GPR
584    fn i32_cmp_gt_u(
585        &mut self,
586        loc_a: Location<Self::GPR, Self::SIMD>,
587        loc_b: Location<Self::GPR, Self::SIMD>,
588        ret: Location<Self::GPR, Self::SIMD>,
589    ) -> Result<(), CompileError>;
590    /// Unsigned Less of Equal Compare 2 i32, result in a GPR
591    fn i32_cmp_le_u(
592        &mut self,
593        loc_a: Location<Self::GPR, Self::SIMD>,
594        loc_b: Location<Self::GPR, Self::SIMD>,
595        ret: Location<Self::GPR, Self::SIMD>,
596    ) -> Result<(), CompileError>;
597    /// Unsigned Less Than Compare 2 i32, result in a GPR
598    fn i32_cmp_lt_u(
599        &mut self,
600        loc_a: Location<Self::GPR, Self::SIMD>,
601        loc_b: Location<Self::GPR, Self::SIMD>,
602        ret: Location<Self::GPR, Self::SIMD>,
603    ) -> Result<(), CompileError>;
604    /// Not Equal Compare 2 i32, result in a GPR
605    fn i32_cmp_ne(
606        &mut self,
607        loc_a: Location<Self::GPR, Self::SIMD>,
608        loc_b: Location<Self::GPR, Self::SIMD>,
609        ret: Location<Self::GPR, Self::SIMD>,
610    ) -> Result<(), CompileError>;
611    /// Equal Compare 2 i32, result in a GPR
612    fn i32_cmp_eq(
613        &mut self,
614        loc_a: Location<Self::GPR, Self::SIMD>,
615        loc_b: Location<Self::GPR, Self::SIMD>,
616        ret: Location<Self::GPR, Self::SIMD>,
617    ) -> Result<(), CompileError>;
618    /// Count Leading 0 bit of an i32
619    fn i32_clz(
620        &mut self,
621        loc: Location<Self::GPR, Self::SIMD>,
622        ret: Location<Self::GPR, Self::SIMD>,
623    ) -> Result<(), CompileError>;
624    /// Count Trailling 0 bit of an i32
625    fn i32_ctz(
626        &mut self,
627        loc: Location<Self::GPR, Self::SIMD>,
628        ret: Location<Self::GPR, Self::SIMD>,
629    ) -> Result<(), CompileError>;
630    /// Count the number of 1 bit of an i32
631    fn i32_popcnt(
632        &mut self,
633        loc: Location<Self::GPR, Self::SIMD>,
634        ret: Location<Self::GPR, Self::SIMD>,
635    ) -> Result<(), CompileError>;
636    /// i32 Logical Shift Left
637    fn i32_shl(
638        &mut self,
639        loc_a: Location<Self::GPR, Self::SIMD>,
640        loc_b: Location<Self::GPR, Self::SIMD>,
641        ret: Location<Self::GPR, Self::SIMD>,
642    ) -> Result<(), CompileError>;
643    /// i32 Logical Shift Right
644    fn i32_shr(
645        &mut self,
646        loc_a: Location<Self::GPR, Self::SIMD>,
647        loc_b: Location<Self::GPR, Self::SIMD>,
648        ret: Location<Self::GPR, Self::SIMD>,
649    ) -> Result<(), CompileError>;
650    /// i32 Arithmetic Shift Right
651    fn i32_sar(
652        &mut self,
653        loc_a: Location<Self::GPR, Self::SIMD>,
654        loc_b: Location<Self::GPR, Self::SIMD>,
655        ret: Location<Self::GPR, Self::SIMD>,
656    ) -> Result<(), CompileError>;
657    /// i32 Roll Left
658    fn i32_rol(
659        &mut self,
660        loc_a: Location<Self::GPR, Self::SIMD>,
661        loc_b: Location<Self::GPR, Self::SIMD>,
662        ret: Location<Self::GPR, Self::SIMD>,
663    ) -> Result<(), CompileError>;
664    /// i32 Roll Right
665    fn i32_ror(
666        &mut self,
667        loc_a: Location<Self::GPR, Self::SIMD>,
668        loc_b: Location<Self::GPR, Self::SIMD>,
669        ret: Location<Self::GPR, Self::SIMD>,
670    ) -> Result<(), CompileError>;
671    /// i32 load
672    #[allow(clippy::too_many_arguments)]
673    fn i32_load(
674        &mut self,
675        addr: Location<Self::GPR, Self::SIMD>,
676        memarg: &MemArg,
677        ret: Location<Self::GPR, Self::SIMD>,
678        need_check: bool,
679        imported_memories: bool,
680        offset: i32,
681        heap_access_oob: Label,
682        unaligned_atomic: Label,
683    ) -> Result<(), CompileError>;
684    /// i32 load of an unsigned 8bits
685    #[allow(clippy::too_many_arguments)]
686    fn i32_load_8u(
687        &mut self,
688        addr: Location<Self::GPR, Self::SIMD>,
689        memarg: &MemArg,
690        ret: Location<Self::GPR, Self::SIMD>,
691        need_check: bool,
692        imported_memories: bool,
693        offset: i32,
694        heap_access_oob: Label,
695        unaligned_atomic: Label,
696    ) -> Result<(), CompileError>;
697    /// i32 load of an signed 8bits
698    #[allow(clippy::too_many_arguments)]
699    fn i32_load_8s(
700        &mut self,
701        addr: Location<Self::GPR, Self::SIMD>,
702        memarg: &MemArg,
703        ret: Location<Self::GPR, Self::SIMD>,
704        need_check: bool,
705        imported_memories: bool,
706        offset: i32,
707        heap_access_oob: Label,
708        unaligned_atomic: Label,
709    ) -> Result<(), CompileError>;
710    /// i32 load of an unsigned 16bits
711    #[allow(clippy::too_many_arguments)]
712    fn i32_load_16u(
713        &mut self,
714        addr: Location<Self::GPR, Self::SIMD>,
715        memarg: &MemArg,
716        ret: Location<Self::GPR, Self::SIMD>,
717        need_check: bool,
718        imported_memories: bool,
719        offset: i32,
720        heap_access_oob: Label,
721        unaligned_atomic: Label,
722    ) -> Result<(), CompileError>;
723    /// i32 load of an signed 16bits
724    #[allow(clippy::too_many_arguments)]
725    fn i32_load_16s(
726        &mut self,
727        addr: Location<Self::GPR, Self::SIMD>,
728        memarg: &MemArg,
729        ret: Location<Self::GPR, Self::SIMD>,
730        need_check: bool,
731        imported_memories: bool,
732        offset: i32,
733        heap_access_oob: Label,
734        unaligned_atomic: Label,
735    ) -> Result<(), CompileError>;
736    /// i32 atomic load
737    #[allow(clippy::too_many_arguments)]
738    fn i32_atomic_load(
739        &mut self,
740        addr: Location<Self::GPR, Self::SIMD>,
741        memarg: &MemArg,
742        ret: Location<Self::GPR, Self::SIMD>,
743        need_check: bool,
744        imported_memories: bool,
745        offset: i32,
746        heap_access_oob: Label,
747        unaligned_atomic: Label,
748    ) -> Result<(), CompileError>;
749    /// i32 atomic load of an unsigned 8bits
750    #[allow(clippy::too_many_arguments)]
751    fn i32_atomic_load_8u(
752        &mut self,
753        addr: Location<Self::GPR, Self::SIMD>,
754        memarg: &MemArg,
755        ret: Location<Self::GPR, Self::SIMD>,
756        need_check: bool,
757        imported_memories: bool,
758        offset: i32,
759        heap_access_oob: Label,
760        unaligned_atomic: Label,
761    ) -> Result<(), CompileError>;
762    /// i32 atomic load of an unsigned 16bits
763    #[allow(clippy::too_many_arguments)]
764    fn i32_atomic_load_16u(
765        &mut self,
766        addr: Location<Self::GPR, Self::SIMD>,
767        memarg: &MemArg,
768        ret: Location<Self::GPR, Self::SIMD>,
769        need_check: bool,
770        imported_memories: bool,
771        offset: i32,
772        heap_access_oob: Label,
773        unaligned_atomic: Label,
774    ) -> Result<(), CompileError>;
775    /// i32 save
776    #[allow(clippy::too_many_arguments)]
777    fn i32_save(
778        &mut self,
779        value: Location<Self::GPR, Self::SIMD>,
780        memarg: &MemArg,
781        addr: Location<Self::GPR, Self::SIMD>,
782        need_check: bool,
783        imported_memories: bool,
784        offset: i32,
785        heap_access_oob: Label,
786        unaligned_atomic: Label,
787    ) -> Result<(), CompileError>;
788    /// i32 save of the lower 8bits
789    #[allow(clippy::too_many_arguments)]
790    fn i32_save_8(
791        &mut self,
792        value: Location<Self::GPR, Self::SIMD>,
793        memarg: &MemArg,
794        addr: Location<Self::GPR, Self::SIMD>,
795        need_check: bool,
796        imported_memories: bool,
797        offset: i32,
798        heap_access_oob: Label,
799        unaligned_atomic: Label,
800    ) -> Result<(), CompileError>;
801    /// i32 save of the lower 16bits
802    #[allow(clippy::too_many_arguments)]
803    fn i32_save_16(
804        &mut self,
805        value: Location<Self::GPR, Self::SIMD>,
806        memarg: &MemArg,
807        addr: Location<Self::GPR, Self::SIMD>,
808        need_check: bool,
809        imported_memories: bool,
810        offset: i32,
811        heap_access_oob: Label,
812        unaligned_atomic: Label,
813    ) -> Result<(), CompileError>;
814    /// i32 atomic save
815    #[allow(clippy::too_many_arguments)]
816    fn i32_atomic_save(
817        &mut self,
818        value: Location<Self::GPR, Self::SIMD>,
819        memarg: &MemArg,
820        addr: Location<Self::GPR, Self::SIMD>,
821        need_check: bool,
822        imported_memories: bool,
823        offset: i32,
824        heap_access_oob: Label,
825        unaligned_atomic: Label,
826    ) -> Result<(), CompileError>;
827    /// i32 atomic save of a the lower 8bits
828    #[allow(clippy::too_many_arguments)]
829    fn i32_atomic_save_8(
830        &mut self,
831        value: Location<Self::GPR, Self::SIMD>,
832        memarg: &MemArg,
833        addr: Location<Self::GPR, Self::SIMD>,
834        need_check: bool,
835        imported_memories: bool,
836        offset: i32,
837        heap_access_oob: Label,
838        unaligned_atomic: Label,
839    ) -> Result<(), CompileError>;
840    /// i32 atomic save of a the lower 16bits
841    #[allow(clippy::too_many_arguments)]
842    fn i32_atomic_save_16(
843        &mut self,
844        value: Location<Self::GPR, Self::SIMD>,
845        memarg: &MemArg,
846        addr: Location<Self::GPR, Self::SIMD>,
847        need_check: bool,
848        imported_memories: bool,
849        offset: i32,
850        heap_access_oob: Label,
851        unaligned_atomic: Label,
852    ) -> Result<(), CompileError>;
853    /// i32 atomic Add with i32
854    #[allow(clippy::too_many_arguments)]
855    fn i32_atomic_add(
856        &mut self,
857        loc: Location<Self::GPR, Self::SIMD>,
858        target: Location<Self::GPR, Self::SIMD>,
859        memarg: &MemArg,
860        ret: Location<Self::GPR, Self::SIMD>,
861        need_check: bool,
862        imported_memories: bool,
863        offset: i32,
864        heap_access_oob: Label,
865        unaligned_atomic: Label,
866    ) -> Result<(), CompileError>;
867    /// i32 atomic Add with unsigned 8bits
868    #[allow(clippy::too_many_arguments)]
869    fn i32_atomic_add_8u(
870        &mut self,
871        loc: Location<Self::GPR, Self::SIMD>,
872        target: Location<Self::GPR, Self::SIMD>,
873        memarg: &MemArg,
874        ret: Location<Self::GPR, Self::SIMD>,
875        need_check: bool,
876        imported_memories: bool,
877        offset: i32,
878        heap_access_oob: Label,
879        unaligned_atomic: Label,
880    ) -> Result<(), CompileError>;
881    /// i32 atomic Add with unsigned 16bits
882    #[allow(clippy::too_many_arguments)]
883    fn i32_atomic_add_16u(
884        &mut self,
885        loc: Location<Self::GPR, Self::SIMD>,
886        target: Location<Self::GPR, Self::SIMD>,
887        memarg: &MemArg,
888        ret: Location<Self::GPR, Self::SIMD>,
889        need_check: bool,
890        imported_memories: bool,
891        offset: i32,
892        heap_access_oob: Label,
893        unaligned_atomic: Label,
894    ) -> Result<(), CompileError>;
895    /// i32 atomic Sub with i32
896    #[allow(clippy::too_many_arguments)]
897    fn i32_atomic_sub(
898        &mut self,
899        loc: Location<Self::GPR, Self::SIMD>,
900        target: Location<Self::GPR, Self::SIMD>,
901        memarg: &MemArg,
902        ret: Location<Self::GPR, Self::SIMD>,
903        need_check: bool,
904        imported_memories: bool,
905        offset: i32,
906        heap_access_oob: Label,
907        unaligned_atomic: Label,
908    ) -> Result<(), CompileError>;
909    /// i32 atomic Sub with unsigned 8bits
910    #[allow(clippy::too_many_arguments)]
911    fn i32_atomic_sub_8u(
912        &mut self,
913        loc: Location<Self::GPR, Self::SIMD>,
914        target: Location<Self::GPR, Self::SIMD>,
915        memarg: &MemArg,
916        ret: Location<Self::GPR, Self::SIMD>,
917        need_check: bool,
918        imported_memories: bool,
919        offset: i32,
920        heap_access_oob: Label,
921        unaligned_atomic: Label,
922    ) -> Result<(), CompileError>;
923    /// i32 atomic Sub with unsigned 16bits
924    #[allow(clippy::too_many_arguments)]
925    fn i32_atomic_sub_16u(
926        &mut self,
927        loc: Location<Self::GPR, Self::SIMD>,
928        target: Location<Self::GPR, Self::SIMD>,
929        memarg: &MemArg,
930        ret: Location<Self::GPR, Self::SIMD>,
931        need_check: bool,
932        imported_memories: bool,
933        offset: i32,
934        heap_access_oob: Label,
935        unaligned_atomic: Label,
936    ) -> Result<(), CompileError>;
937    /// i32 atomic And with i32
938    #[allow(clippy::too_many_arguments)]
939    fn i32_atomic_and(
940        &mut self,
941        loc: Location<Self::GPR, Self::SIMD>,
942        target: Location<Self::GPR, Self::SIMD>,
943        memarg: &MemArg,
944        ret: Location<Self::GPR, Self::SIMD>,
945        need_check: bool,
946        imported_memories: bool,
947        offset: i32,
948        heap_access_oob: Label,
949        unaligned_atomic: Label,
950    ) -> Result<(), CompileError>;
951    /// i32 atomic And with unsigned 8bits
952    #[allow(clippy::too_many_arguments)]
953    fn i32_atomic_and_8u(
954        &mut self,
955        loc: Location<Self::GPR, Self::SIMD>,
956        target: Location<Self::GPR, Self::SIMD>,
957        memarg: &MemArg,
958        ret: Location<Self::GPR, Self::SIMD>,
959        need_check: bool,
960        imported_memories: bool,
961        offset: i32,
962        heap_access_oob: Label,
963        unaligned_atomic: Label,
964    ) -> Result<(), CompileError>;
965    /// i32 atomic And with unsigned 16bits
966    #[allow(clippy::too_many_arguments)]
967    fn i32_atomic_and_16u(
968        &mut self,
969        loc: Location<Self::GPR, Self::SIMD>,
970        target: Location<Self::GPR, Self::SIMD>,
971        memarg: &MemArg,
972        ret: Location<Self::GPR, Self::SIMD>,
973        need_check: bool,
974        imported_memories: bool,
975        offset: i32,
976        heap_access_oob: Label,
977        unaligned_atomic: Label,
978    ) -> Result<(), CompileError>;
979    /// i32 atomic Or with i32
980    #[allow(clippy::too_many_arguments)]
981    fn i32_atomic_or(
982        &mut self,
983        loc: Location<Self::GPR, Self::SIMD>,
984        target: Location<Self::GPR, Self::SIMD>,
985        memarg: &MemArg,
986        ret: Location<Self::GPR, Self::SIMD>,
987        need_check: bool,
988        imported_memories: bool,
989        offset: i32,
990        heap_access_oob: Label,
991        unaligned_atomic: Label,
992    ) -> Result<(), CompileError>;
993    /// i32 atomic Or with unsigned 8bits
994    #[allow(clippy::too_many_arguments)]
995    fn i32_atomic_or_8u(
996        &mut self,
997        loc: Location<Self::GPR, Self::SIMD>,
998        target: Location<Self::GPR, Self::SIMD>,
999        memarg: &MemArg,
1000        ret: Location<Self::GPR, Self::SIMD>,
1001        need_check: bool,
1002        imported_memories: bool,
1003        offset: i32,
1004        heap_access_oob: Label,
1005        unaligned_atomic: Label,
1006    ) -> Result<(), CompileError>;
1007    /// i32 atomic Or with unsigned 16bits
1008    #[allow(clippy::too_many_arguments)]
1009    fn i32_atomic_or_16u(
1010        &mut self,
1011        loc: Location<Self::GPR, Self::SIMD>,
1012        target: Location<Self::GPR, Self::SIMD>,
1013        memarg: &MemArg,
1014        ret: Location<Self::GPR, Self::SIMD>,
1015        need_check: bool,
1016        imported_memories: bool,
1017        offset: i32,
1018        heap_access_oob: Label,
1019        unaligned_atomic: Label,
1020    ) -> Result<(), CompileError>;
1021    /// i32 atomic Xor with i32
1022    #[allow(clippy::too_many_arguments)]
1023    fn i32_atomic_xor(
1024        &mut self,
1025        loc: Location<Self::GPR, Self::SIMD>,
1026        target: Location<Self::GPR, Self::SIMD>,
1027        memarg: &MemArg,
1028        ret: Location<Self::GPR, Self::SIMD>,
1029        need_check: bool,
1030        imported_memories: bool,
1031        offset: i32,
1032        heap_access_oob: Label,
1033        unaligned_atomic: Label,
1034    ) -> Result<(), CompileError>;
1035    /// i32 atomic Xor with unsigned 8bits
1036    #[allow(clippy::too_many_arguments)]
1037    fn i32_atomic_xor_8u(
1038        &mut self,
1039        loc: Location<Self::GPR, Self::SIMD>,
1040        target: Location<Self::GPR, Self::SIMD>,
1041        memarg: &MemArg,
1042        ret: Location<Self::GPR, Self::SIMD>,
1043        need_check: bool,
1044        imported_memories: bool,
1045        offset: i32,
1046        heap_access_oob: Label,
1047        unaligned_atomic: Label,
1048    ) -> Result<(), CompileError>;
1049    /// i32 atomic Xor with unsigned 16bits
1050    #[allow(clippy::too_many_arguments)]
1051    fn i32_atomic_xor_16u(
1052        &mut self,
1053        loc: Location<Self::GPR, Self::SIMD>,
1054        target: Location<Self::GPR, Self::SIMD>,
1055        memarg: &MemArg,
1056        ret: Location<Self::GPR, Self::SIMD>,
1057        need_check: bool,
1058        imported_memories: bool,
1059        offset: i32,
1060        heap_access_oob: Label,
1061        unaligned_atomic: Label,
1062    ) -> Result<(), CompileError>;
1063    /// i32 atomic Exchange with i32
1064    #[allow(clippy::too_many_arguments)]
1065    fn i32_atomic_xchg(
1066        &mut self,
1067        loc: Location<Self::GPR, Self::SIMD>,
1068        target: Location<Self::GPR, Self::SIMD>,
1069        memarg: &MemArg,
1070        ret: Location<Self::GPR, Self::SIMD>,
1071        need_check: bool,
1072        imported_memories: bool,
1073        offset: i32,
1074        heap_access_oob: Label,
1075        unaligned_atomic: Label,
1076    ) -> Result<(), CompileError>;
1077    /// i32 atomic Exchange with u8
1078    #[allow(clippy::too_many_arguments)]
1079    fn i32_atomic_xchg_8u(
1080        &mut self,
1081        loc: Location<Self::GPR, Self::SIMD>,
1082        target: Location<Self::GPR, Self::SIMD>,
1083        memarg: &MemArg,
1084        ret: Location<Self::GPR, Self::SIMD>,
1085        need_check: bool,
1086        imported_memories: bool,
1087        offset: i32,
1088        heap_access_oob: Label,
1089        unaligned_atomic: Label,
1090    ) -> Result<(), CompileError>;
1091    /// i32 atomic Exchange with u16
1092    #[allow(clippy::too_many_arguments)]
1093    fn i32_atomic_xchg_16u(
1094        &mut self,
1095        loc: Location<Self::GPR, Self::SIMD>,
1096        target: Location<Self::GPR, Self::SIMD>,
1097        memarg: &MemArg,
1098        ret: Location<Self::GPR, Self::SIMD>,
1099        need_check: bool,
1100        imported_memories: bool,
1101        offset: i32,
1102        heap_access_oob: Label,
1103        unaligned_atomic: Label,
1104    ) -> Result<(), CompileError>;
1105    /// i32 atomic Compare and Exchange with i32
1106    #[allow(clippy::too_many_arguments)]
1107    fn i32_atomic_cmpxchg(
1108        &mut self,
1109        new: Location<Self::GPR, Self::SIMD>,
1110        cmp: Location<Self::GPR, Self::SIMD>,
1111        target: Location<Self::GPR, Self::SIMD>,
1112        memarg: &MemArg,
1113        ret: Location<Self::GPR, Self::SIMD>,
1114        need_check: bool,
1115        imported_memories: bool,
1116        offset: i32,
1117        heap_access_oob: Label,
1118        unaligned_atomic: Label,
1119    ) -> Result<(), CompileError>;
1120    /// i32 atomic Compare and Exchange with u8
1121    #[allow(clippy::too_many_arguments)]
1122    fn i32_atomic_cmpxchg_8u(
1123        &mut self,
1124        new: Location<Self::GPR, Self::SIMD>,
1125        cmp: Location<Self::GPR, Self::SIMD>,
1126        target: Location<Self::GPR, Self::SIMD>,
1127        memarg: &MemArg,
1128        ret: Location<Self::GPR, Self::SIMD>,
1129        need_check: bool,
1130        imported_memories: bool,
1131        offset: i32,
1132        heap_access_oob: Label,
1133        unaligned_atomic: Label,
1134    ) -> Result<(), CompileError>;
1135    /// i32 atomic Compare and Exchange with u16
1136    #[allow(clippy::too_many_arguments)]
1137    fn i32_atomic_cmpxchg_16u(
1138        &mut self,
1139        new: Location<Self::GPR, Self::SIMD>,
1140        cmp: Location<Self::GPR, Self::SIMD>,
1141        target: Location<Self::GPR, Self::SIMD>,
1142        memarg: &MemArg,
1143        ret: Location<Self::GPR, Self::SIMD>,
1144        need_check: bool,
1145        imported_memories: bool,
1146        offset: i32,
1147        heap_access_oob: Label,
1148        unaligned_atomic: Label,
1149    ) -> Result<(), CompileError>;
1150
1151    /// emit a move function address to GPR ready for call, using appropriate relocation
1152    fn emit_call_with_reloc(
1153        &mut self,
1154        calling_convention: CallingConvention,
1155        reloc_target: RelocationTarget,
1156    ) -> Result<Vec<Relocation>, CompileError>;
1157    /// Add with location directly from the stack
1158    fn emit_binop_add64(
1159        &mut self,
1160        loc_a: Location<Self::GPR, Self::SIMD>,
1161        loc_b: Location<Self::GPR, Self::SIMD>,
1162        ret: Location<Self::GPR, Self::SIMD>,
1163    ) -> Result<(), CompileError>;
1164    /// Sub with location directly from the stack
1165    fn emit_binop_sub64(
1166        &mut self,
1167        loc_a: Location<Self::GPR, Self::SIMD>,
1168        loc_b: Location<Self::GPR, Self::SIMD>,
1169        ret: Location<Self::GPR, Self::SIMD>,
1170    ) -> Result<(), CompileError>;
1171    /// Multiply with location directly from the stack
1172    fn emit_binop_mul64(
1173        &mut self,
1174        loc_a: Location<Self::GPR, Self::SIMD>,
1175        loc_b: Location<Self::GPR, Self::SIMD>,
1176        ret: Location<Self::GPR, Self::SIMD>,
1177    ) -> Result<(), CompileError>;
1178    /// Unsigned Division with location directly from the stack. return the offset of the DIV opcode, to mark as trappable.
1179    fn emit_binop_udiv64(
1180        &mut self,
1181        loc_a: Location<Self::GPR, Self::SIMD>,
1182        loc_b: Location<Self::GPR, Self::SIMD>,
1183        ret: Location<Self::GPR, Self::SIMD>,
1184        integer_division_by_zero: Label,
1185        integer_overflow: Label,
1186    ) -> Result<usize, CompileError>;
1187    /// Signed Division with location directly from the stack. return the offset of the DIV opcode, to mark as trappable.
1188    fn emit_binop_sdiv64(
1189        &mut self,
1190        loc_a: Location<Self::GPR, Self::SIMD>,
1191        loc_b: Location<Self::GPR, Self::SIMD>,
1192        ret: Location<Self::GPR, Self::SIMD>,
1193        integer_division_by_zero: Label,
1194        integer_overflow: Label,
1195    ) -> Result<usize, CompileError>;
1196    /// Unsigned Reminder (of a division) with location directly from the stack. return the offset of the DIV opcode, to mark as trappable.
1197    fn emit_binop_urem64(
1198        &mut self,
1199        loc_a: Location<Self::GPR, Self::SIMD>,
1200        loc_b: Location<Self::GPR, Self::SIMD>,
1201        ret: Location<Self::GPR, Self::SIMD>,
1202        integer_division_by_zero: Label,
1203        integer_overflow: Label,
1204    ) -> Result<usize, CompileError>;
1205    /// Signed Reminder (of a Division) with location directly from the stack. return the offset of the DIV opcode, to mark as trappable.
1206    fn emit_binop_srem64(
1207        &mut self,
1208        loc_a: Location<Self::GPR, Self::SIMD>,
1209        loc_b: Location<Self::GPR, Self::SIMD>,
1210        ret: Location<Self::GPR, Self::SIMD>,
1211        integer_division_by_zero: Label,
1212        integer_overflow: Label,
1213    ) -> Result<usize, CompileError>;
1214    /// And with location directly from the stack
1215    fn emit_binop_and64(
1216        &mut self,
1217        loc_a: Location<Self::GPR, Self::SIMD>,
1218        loc_b: Location<Self::GPR, Self::SIMD>,
1219        ret: Location<Self::GPR, Self::SIMD>,
1220    ) -> Result<(), CompileError>;
1221    /// Or with location directly from the stack
1222    fn emit_binop_or64(
1223        &mut self,
1224        loc_a: Location<Self::GPR, Self::SIMD>,
1225        loc_b: Location<Self::GPR, Self::SIMD>,
1226        ret: Location<Self::GPR, Self::SIMD>,
1227    ) -> Result<(), CompileError>;
1228    /// Xor with location directly from the stack
1229    fn emit_binop_xor64(
1230        &mut self,
1231        loc_a: Location<Self::GPR, Self::SIMD>,
1232        loc_b: Location<Self::GPR, Self::SIMD>,
1233        ret: Location<Self::GPR, Self::SIMD>,
1234    ) -> Result<(), CompileError>;
1235    /// Signed Greater of Equal Compare 2 i64, result in a GPR
1236    fn i64_cmp_ge_s(
1237        &mut self,
1238        loc_a: Location<Self::GPR, Self::SIMD>,
1239        loc_b: Location<Self::GPR, Self::SIMD>,
1240        ret: Location<Self::GPR, Self::SIMD>,
1241    ) -> Result<(), CompileError>;
1242    /// Signed Greater Than Compare 2 i64, result in a GPR
1243    fn i64_cmp_gt_s(
1244        &mut self,
1245        loc_a: Location<Self::GPR, Self::SIMD>,
1246        loc_b: Location<Self::GPR, Self::SIMD>,
1247        ret: Location<Self::GPR, Self::SIMD>,
1248    ) -> Result<(), CompileError>;
1249    /// Signed Less of Equal Compare 2 i64, result in a GPR
1250    fn i64_cmp_le_s(
1251        &mut self,
1252        loc_a: Location<Self::GPR, Self::SIMD>,
1253        loc_b: Location<Self::GPR, Self::SIMD>,
1254        ret: Location<Self::GPR, Self::SIMD>,
1255    ) -> Result<(), CompileError>;
1256    /// Signed Less Than Compare 2 i64, result in a GPR
1257    fn i64_cmp_lt_s(
1258        &mut self,
1259        loc_a: Location<Self::GPR, Self::SIMD>,
1260        loc_b: Location<Self::GPR, Self::SIMD>,
1261        ret: Location<Self::GPR, Self::SIMD>,
1262    ) -> Result<(), CompileError>;
1263    /// Unsigned Greater of Equal Compare 2 i64, result in a GPR
1264    fn i64_cmp_ge_u(
1265        &mut self,
1266        loc_a: Location<Self::GPR, Self::SIMD>,
1267        loc_b: Location<Self::GPR, Self::SIMD>,
1268        ret: Location<Self::GPR, Self::SIMD>,
1269    ) -> Result<(), CompileError>;
1270    /// Unsigned Greater Than Compare 2 i64, result in a GPR
1271    fn i64_cmp_gt_u(
1272        &mut self,
1273        loc_a: Location<Self::GPR, Self::SIMD>,
1274        loc_b: Location<Self::GPR, Self::SIMD>,
1275        ret: Location<Self::GPR, Self::SIMD>,
1276    ) -> Result<(), CompileError>;
1277    /// Unsigned Less of Equal Compare 2 i64, result in a GPR
1278    fn i64_cmp_le_u(
1279        &mut self,
1280        loc_a: Location<Self::GPR, Self::SIMD>,
1281        loc_b: Location<Self::GPR, Self::SIMD>,
1282        ret: Location<Self::GPR, Self::SIMD>,
1283    ) -> Result<(), CompileError>;
1284    /// Unsigned Less Than Compare 2 i64, result in a GPR
1285    fn i64_cmp_lt_u(
1286        &mut self,
1287        loc_a: Location<Self::GPR, Self::SIMD>,
1288        loc_b: Location<Self::GPR, Self::SIMD>,
1289        ret: Location<Self::GPR, Self::SIMD>,
1290    ) -> Result<(), CompileError>;
1291    /// Not Equal Compare 2 i64, result in a GPR
1292    fn i64_cmp_ne(
1293        &mut self,
1294        loc_a: Location<Self::GPR, Self::SIMD>,
1295        loc_b: Location<Self::GPR, Self::SIMD>,
1296        ret: Location<Self::GPR, Self::SIMD>,
1297    ) -> Result<(), CompileError>;
1298    /// Equal Compare 2 i64, result in a GPR
1299    fn i64_cmp_eq(
1300        &mut self,
1301        loc_a: Location<Self::GPR, Self::SIMD>,
1302        loc_b: Location<Self::GPR, Self::SIMD>,
1303        ret: Location<Self::GPR, Self::SIMD>,
1304    ) -> Result<(), CompileError>;
1305    /// Count Leading 0 bit of an i64
1306    fn i64_clz(
1307        &mut self,
1308        loc: Location<Self::GPR, Self::SIMD>,
1309        ret: Location<Self::GPR, Self::SIMD>,
1310    ) -> Result<(), CompileError>;
1311    /// Count Trailling 0 bit of an i64
1312    fn i64_ctz(
1313        &mut self,
1314        loc: Location<Self::GPR, Self::SIMD>,
1315        ret: Location<Self::GPR, Self::SIMD>,
1316    ) -> Result<(), CompileError>;
1317    /// Count the number of 1 bit of an i64
1318    fn i64_popcnt(
1319        &mut self,
1320        loc: Location<Self::GPR, Self::SIMD>,
1321        ret: Location<Self::GPR, Self::SIMD>,
1322    ) -> Result<(), CompileError>;
1323    /// i64 Logical Shift Left
1324    fn i64_shl(
1325        &mut self,
1326        loc_a: Location<Self::GPR, Self::SIMD>,
1327        loc_b: Location<Self::GPR, Self::SIMD>,
1328        ret: Location<Self::GPR, Self::SIMD>,
1329    ) -> Result<(), CompileError>;
1330    /// i64 Logical Shift Right
1331    fn i64_shr(
1332        &mut self,
1333        loc_a: Location<Self::GPR, Self::SIMD>,
1334        loc_b: Location<Self::GPR, Self::SIMD>,
1335        ret: Location<Self::GPR, Self::SIMD>,
1336    ) -> Result<(), CompileError>;
1337    /// i64 Arithmetic Shift Right
1338    fn i64_sar(
1339        &mut self,
1340        loc_a: Location<Self::GPR, Self::SIMD>,
1341        loc_b: Location<Self::GPR, Self::SIMD>,
1342        ret: Location<Self::GPR, Self::SIMD>,
1343    ) -> Result<(), CompileError>;
1344    /// i64 Roll Left
1345    fn i64_rol(
1346        &mut self,
1347        loc_a: Location<Self::GPR, Self::SIMD>,
1348        loc_b: Location<Self::GPR, Self::SIMD>,
1349        ret: Location<Self::GPR, Self::SIMD>,
1350    ) -> Result<(), CompileError>;
1351    /// i64 Roll Right
1352    fn i64_ror(
1353        &mut self,
1354        loc_a: Location<Self::GPR, Self::SIMD>,
1355        loc_b: Location<Self::GPR, Self::SIMD>,
1356        ret: Location<Self::GPR, Self::SIMD>,
1357    ) -> Result<(), CompileError>;
1358    /// i64 load
1359    #[allow(clippy::too_many_arguments)]
1360    fn i64_load(
1361        &mut self,
1362        addr: Location<Self::GPR, Self::SIMD>,
1363        memarg: &MemArg,
1364        ret: Location<Self::GPR, Self::SIMD>,
1365        need_check: bool,
1366        imported_memories: bool,
1367        offset: i32,
1368        heap_access_oob: Label,
1369        unaligned_atomic: Label,
1370    ) -> Result<(), CompileError>;
1371    /// i64 load of an unsigned 8bits
1372    #[allow(clippy::too_many_arguments)]
1373    fn i64_load_8u(
1374        &mut self,
1375        addr: Location<Self::GPR, Self::SIMD>,
1376        memarg: &MemArg,
1377        ret: Location<Self::GPR, Self::SIMD>,
1378        need_check: bool,
1379        imported_memories: bool,
1380        offset: i32,
1381        heap_access_oob: Label,
1382        unaligned_atomic: Label,
1383    ) -> Result<(), CompileError>;
1384    /// i64 load of an signed 8bits
1385    #[allow(clippy::too_many_arguments)]
1386    fn i64_load_8s(
1387        &mut self,
1388        addr: Location<Self::GPR, Self::SIMD>,
1389        memarg: &MemArg,
1390        ret: Location<Self::GPR, Self::SIMD>,
1391        need_check: bool,
1392        imported_memories: bool,
1393        offset: i32,
1394        heap_access_oob: Label,
1395        unaligned_atomic: Label,
1396    ) -> Result<(), CompileError>;
1397    /// i64 load of an unsigned 32bits
1398    #[allow(clippy::too_many_arguments)]
1399    fn i64_load_32u(
1400        &mut self,
1401        addr: Location<Self::GPR, Self::SIMD>,
1402        memarg: &MemArg,
1403        ret: Location<Self::GPR, Self::SIMD>,
1404        need_check: bool,
1405        imported_memories: bool,
1406        offset: i32,
1407        heap_access_oob: Label,
1408        unaligned_atomic: Label,
1409    ) -> Result<(), CompileError>;
1410    /// i64 load of an signed 32bits
1411    #[allow(clippy::too_many_arguments)]
1412    fn i64_load_32s(
1413        &mut self,
1414        addr: Location<Self::GPR, Self::SIMD>,
1415        memarg: &MemArg,
1416        ret: Location<Self::GPR, Self::SIMD>,
1417        need_check: bool,
1418        imported_memories: bool,
1419        offset: i32,
1420        heap_access_oob: Label,
1421        unaligned_atomic: Label,
1422    ) -> Result<(), CompileError>;
1423    /// i64 load of an signed 16bits
1424    #[allow(clippy::too_many_arguments)]
1425    fn i64_load_16u(
1426        &mut self,
1427        addr: Location<Self::GPR, Self::SIMD>,
1428        memarg: &MemArg,
1429        ret: Location<Self::GPR, Self::SIMD>,
1430        need_check: bool,
1431        imported_memories: bool,
1432        offset: i32,
1433        heap_access_oob: Label,
1434        unaligned_atomic: Label,
1435    ) -> Result<(), CompileError>;
1436    /// i64 load of an signed 16bits
1437    #[allow(clippy::too_many_arguments)]
1438    fn i64_load_16s(
1439        &mut self,
1440        addr: Location<Self::GPR, Self::SIMD>,
1441        memarg: &MemArg,
1442        ret: Location<Self::GPR, Self::SIMD>,
1443        need_check: bool,
1444        imported_memories: bool,
1445        offset: i32,
1446        heap_access_oob: Label,
1447        unaligned_atomic: Label,
1448    ) -> Result<(), CompileError>;
1449    /// i64 atomic load
1450    #[allow(clippy::too_many_arguments)]
1451    fn i64_atomic_load(
1452        &mut self,
1453        addr: Location<Self::GPR, Self::SIMD>,
1454        memarg: &MemArg,
1455        ret: Location<Self::GPR, Self::SIMD>,
1456        need_check: bool,
1457        imported_memories: bool,
1458        offset: i32,
1459        heap_access_oob: Label,
1460        unaligned_atomic: Label,
1461    ) -> Result<(), CompileError>;
1462    /// i64 atomic load from unsigned 8bits
1463    #[allow(clippy::too_many_arguments)]
1464    fn i64_atomic_load_8u(
1465        &mut self,
1466        addr: Location<Self::GPR, Self::SIMD>,
1467        memarg: &MemArg,
1468        ret: Location<Self::GPR, Self::SIMD>,
1469        need_check: bool,
1470        imported_memories: bool,
1471        offset: i32,
1472        heap_access_oob: Label,
1473        unaligned_atomic: Label,
1474    ) -> Result<(), CompileError>;
1475    /// i64 atomic load from unsigned 16bits
1476    #[allow(clippy::too_many_arguments)]
1477    fn i64_atomic_load_16u(
1478        &mut self,
1479        addr: Location<Self::GPR, Self::SIMD>,
1480        memarg: &MemArg,
1481        ret: Location<Self::GPR, Self::SIMD>,
1482        need_check: bool,
1483        imported_memories: bool,
1484        offset: i32,
1485        heap_access_oob: Label,
1486        unaligned_atomic: Label,
1487    ) -> Result<(), CompileError>;
1488    /// i64 atomic load from unsigned 32bits
1489    #[allow(clippy::too_many_arguments)]
1490    fn i64_atomic_load_32u(
1491        &mut self,
1492        addr: Location<Self::GPR, Self::SIMD>,
1493        memarg: &MemArg,
1494        ret: Location<Self::GPR, Self::SIMD>,
1495        need_check: bool,
1496        imported_memories: bool,
1497        offset: i32,
1498        heap_access_oob: Label,
1499        unaligned_atomic: Label,
1500    ) -> Result<(), CompileError>;
1501    /// i64 save
1502    #[allow(clippy::too_many_arguments)]
1503    fn i64_save(
1504        &mut self,
1505        value: Location<Self::GPR, Self::SIMD>,
1506        memarg: &MemArg,
1507        addr: Location<Self::GPR, Self::SIMD>,
1508        need_check: bool,
1509        imported_memories: bool,
1510        offset: i32,
1511        heap_access_oob: Label,
1512        unaligned_atomic: Label,
1513    ) -> Result<(), CompileError>;
1514    /// i64 save of the lower 8bits
1515    #[allow(clippy::too_many_arguments)]
1516    fn i64_save_8(
1517        &mut self,
1518        value: Location<Self::GPR, Self::SIMD>,
1519        memarg: &MemArg,
1520        addr: Location<Self::GPR, Self::SIMD>,
1521        need_check: bool,
1522        imported_memories: bool,
1523        offset: i32,
1524        heap_access_oob: Label,
1525        unaligned_atomic: Label,
1526    ) -> Result<(), CompileError>;
1527    /// i64 save of the lower 16bits
1528    #[allow(clippy::too_many_arguments)]
1529    fn i64_save_16(
1530        &mut self,
1531        value: Location<Self::GPR, Self::SIMD>,
1532        memarg: &MemArg,
1533        addr: Location<Self::GPR, Self::SIMD>,
1534        need_check: bool,
1535        imported_memories: bool,
1536        offset: i32,
1537        heap_access_oob: Label,
1538        unaligned_atomic: Label,
1539    ) -> Result<(), CompileError>;
1540    /// i64 save of the lower 32bits
1541    #[allow(clippy::too_many_arguments)]
1542    fn i64_save_32(
1543        &mut self,
1544        value: Location<Self::GPR, Self::SIMD>,
1545        memarg: &MemArg,
1546        addr: Location<Self::GPR, Self::SIMD>,
1547        need_check: bool,
1548        imported_memories: bool,
1549        offset: i32,
1550        heap_access_oob: Label,
1551        unaligned_atomic: Label,
1552    ) -> Result<(), CompileError>;
1553    /// i64 atomic save
1554    #[allow(clippy::too_many_arguments)]
1555    fn i64_atomic_save(
1556        &mut self,
1557        value: Location<Self::GPR, Self::SIMD>,
1558        memarg: &MemArg,
1559        addr: Location<Self::GPR, Self::SIMD>,
1560        need_check: bool,
1561        imported_memories: bool,
1562        offset: i32,
1563        heap_access_oob: Label,
1564        unaligned_atomic: Label,
1565    ) -> Result<(), CompileError>;
1566    /// i64 atomic save of a the lower 8bits
1567    #[allow(clippy::too_many_arguments)]
1568    fn i64_atomic_save_8(
1569        &mut self,
1570        value: Location<Self::GPR, Self::SIMD>,
1571        memarg: &MemArg,
1572        addr: Location<Self::GPR, Self::SIMD>,
1573        need_check: bool,
1574        imported_memories: bool,
1575        offset: i32,
1576        heap_access_oob: Label,
1577        unaligned_atomic: Label,
1578    ) -> Result<(), CompileError>;
1579    /// i64 atomic save of a the lower 16bits
1580    #[allow(clippy::too_many_arguments)]
1581    fn i64_atomic_save_16(
1582        &mut self,
1583        value: Location<Self::GPR, Self::SIMD>,
1584        memarg: &MemArg,
1585        addr: Location<Self::GPR, Self::SIMD>,
1586        need_check: bool,
1587        imported_memories: bool,
1588        offset: i32,
1589        heap_access_oob: Label,
1590        unaligned_atomic: Label,
1591    ) -> Result<(), CompileError>;
1592    /// i64 atomic save of a the lower 32bits
1593    #[allow(clippy::too_many_arguments)]
1594    fn i64_atomic_save_32(
1595        &mut self,
1596        value: Location<Self::GPR, Self::SIMD>,
1597        memarg: &MemArg,
1598        addr: Location<Self::GPR, Self::SIMD>,
1599        need_check: bool,
1600        imported_memories: bool,
1601        offset: i32,
1602        heap_access_oob: Label,
1603        unaligned_atomic: Label,
1604    ) -> Result<(), CompileError>;
1605    /// i64 atomic Add with i64
1606    #[allow(clippy::too_many_arguments)]
1607    fn i64_atomic_add(
1608        &mut self,
1609        loc: Location<Self::GPR, Self::SIMD>,
1610        target: Location<Self::GPR, Self::SIMD>,
1611        memarg: &MemArg,
1612        ret: Location<Self::GPR, Self::SIMD>,
1613        need_check: bool,
1614        imported_memories: bool,
1615        offset: i32,
1616        heap_access_oob: Label,
1617        unaligned_atomic: Label,
1618    ) -> Result<(), CompileError>;
1619    /// i64 atomic Add with unsigned 8bits
1620    #[allow(clippy::too_many_arguments)]
1621    fn i64_atomic_add_8u(
1622        &mut self,
1623        loc: Location<Self::GPR, Self::SIMD>,
1624        target: Location<Self::GPR, Self::SIMD>,
1625        memarg: &MemArg,
1626        ret: Location<Self::GPR, Self::SIMD>,
1627        need_check: bool,
1628        imported_memories: bool,
1629        offset: i32,
1630        heap_access_oob: Label,
1631        unaligned_atomic: Label,
1632    ) -> Result<(), CompileError>;
1633    /// i64 atomic Add with unsigned 16bits
1634    #[allow(clippy::too_many_arguments)]
1635    fn i64_atomic_add_16u(
1636        &mut self,
1637        loc: Location<Self::GPR, Self::SIMD>,
1638        target: Location<Self::GPR, Self::SIMD>,
1639        memarg: &MemArg,
1640        ret: Location<Self::GPR, Self::SIMD>,
1641        need_check: bool,
1642        imported_memories: bool,
1643        offset: i32,
1644        heap_access_oob: Label,
1645        unaligned_atomic: Label,
1646    ) -> Result<(), CompileError>;
1647    /// i64 atomic Add with unsigned 32bits
1648    #[allow(clippy::too_many_arguments)]
1649    fn i64_atomic_add_32u(
1650        &mut self,
1651        loc: Location<Self::GPR, Self::SIMD>,
1652        target: Location<Self::GPR, Self::SIMD>,
1653        memarg: &MemArg,
1654        ret: Location<Self::GPR, Self::SIMD>,
1655        need_check: bool,
1656        imported_memories: bool,
1657        offset: i32,
1658        heap_access_oob: Label,
1659        unaligned_atomic: Label,
1660    ) -> Result<(), CompileError>;
1661    /// i64 atomic Sub with i64
1662    #[allow(clippy::too_many_arguments)]
1663    fn i64_atomic_sub(
1664        &mut self,
1665        loc: Location<Self::GPR, Self::SIMD>,
1666        target: Location<Self::GPR, Self::SIMD>,
1667        memarg: &MemArg,
1668        ret: Location<Self::GPR, Self::SIMD>,
1669        need_check: bool,
1670        imported_memories: bool,
1671        offset: i32,
1672        heap_access_oob: Label,
1673        unaligned_atomic: Label,
1674    ) -> Result<(), CompileError>;
1675    /// i64 atomic Sub with unsigned 8bits
1676    #[allow(clippy::too_many_arguments)]
1677    fn i64_atomic_sub_8u(
1678        &mut self,
1679        loc: Location<Self::GPR, Self::SIMD>,
1680        target: Location<Self::GPR, Self::SIMD>,
1681        memarg: &MemArg,
1682        ret: Location<Self::GPR, Self::SIMD>,
1683        need_check: bool,
1684        imported_memories: bool,
1685        offset: i32,
1686        heap_access_oob: Label,
1687        unaligned_atomic: Label,
1688    ) -> Result<(), CompileError>;
1689    /// i64 atomic Sub with unsigned 16bits
1690    #[allow(clippy::too_many_arguments)]
1691    fn i64_atomic_sub_16u(
1692        &mut self,
1693        loc: Location<Self::GPR, Self::SIMD>,
1694        target: Location<Self::GPR, Self::SIMD>,
1695        memarg: &MemArg,
1696        ret: Location<Self::GPR, Self::SIMD>,
1697        need_check: bool,
1698        imported_memories: bool,
1699        offset: i32,
1700        heap_access_oob: Label,
1701        unaligned_atomic: Label,
1702    ) -> Result<(), CompileError>;
1703    /// i64 atomic Sub with unsigned 32bits
1704    #[allow(clippy::too_many_arguments)]
1705    fn i64_atomic_sub_32u(
1706        &mut self,
1707        loc: Location<Self::GPR, Self::SIMD>,
1708        target: Location<Self::GPR, Self::SIMD>,
1709        memarg: &MemArg,
1710        ret: Location<Self::GPR, Self::SIMD>,
1711        need_check: bool,
1712        imported_memories: bool,
1713        offset: i32,
1714        heap_access_oob: Label,
1715        unaligned_atomic: Label,
1716    ) -> Result<(), CompileError>;
1717    /// i64 atomic And with i64
1718    #[allow(clippy::too_many_arguments)]
1719    fn i64_atomic_and(
1720        &mut self,
1721        loc: Location<Self::GPR, Self::SIMD>,
1722        target: Location<Self::GPR, Self::SIMD>,
1723        memarg: &MemArg,
1724        ret: Location<Self::GPR, Self::SIMD>,
1725        need_check: bool,
1726        imported_memories: bool,
1727        offset: i32,
1728        heap_access_oob: Label,
1729        unaligned_atomic: Label,
1730    ) -> Result<(), CompileError>;
1731    /// i64 atomic And with unsigned 8bits
1732    #[allow(clippy::too_many_arguments)]
1733    fn i64_atomic_and_8u(
1734        &mut self,
1735        loc: Location<Self::GPR, Self::SIMD>,
1736        target: Location<Self::GPR, Self::SIMD>,
1737        memarg: &MemArg,
1738        ret: Location<Self::GPR, Self::SIMD>,
1739        need_check: bool,
1740        imported_memories: bool,
1741        offset: i32,
1742        heap_access_oob: Label,
1743        unaligned_atomic: Label,
1744    ) -> Result<(), CompileError>;
1745    /// i64 atomic And with unsigned 16bits
1746    #[allow(clippy::too_many_arguments)]
1747    fn i64_atomic_and_16u(
1748        &mut self,
1749        loc: Location<Self::GPR, Self::SIMD>,
1750        target: Location<Self::GPR, Self::SIMD>,
1751        memarg: &MemArg,
1752        ret: Location<Self::GPR, Self::SIMD>,
1753        need_check: bool,
1754        imported_memories: bool,
1755        offset: i32,
1756        heap_access_oob: Label,
1757        unaligned_atomic: Label,
1758    ) -> Result<(), CompileError>;
1759    /// i64 atomic And with unsigned 32bits
1760    #[allow(clippy::too_many_arguments)]
1761    fn i64_atomic_and_32u(
1762        &mut self,
1763        loc: Location<Self::GPR, Self::SIMD>,
1764        target: Location<Self::GPR, Self::SIMD>,
1765        memarg: &MemArg,
1766        ret: Location<Self::GPR, Self::SIMD>,
1767        need_check: bool,
1768        imported_memories: bool,
1769        offset: i32,
1770        heap_access_oob: Label,
1771        unaligned_atomic: Label,
1772    ) -> Result<(), CompileError>;
1773    /// i64 atomic Or with i64
1774    #[allow(clippy::too_many_arguments)]
1775    fn i64_atomic_or(
1776        &mut self,
1777        loc: Location<Self::GPR, Self::SIMD>,
1778        target: Location<Self::GPR, Self::SIMD>,
1779        memarg: &MemArg,
1780        ret: Location<Self::GPR, Self::SIMD>,
1781        need_check: bool,
1782        imported_memories: bool,
1783        offset: i32,
1784        heap_access_oob: Label,
1785        unaligned_atomic: Label,
1786    ) -> Result<(), CompileError>;
1787    /// i64 atomic Or with unsigned 8bits
1788    #[allow(clippy::too_many_arguments)]
1789    fn i64_atomic_or_8u(
1790        &mut self,
1791        loc: Location<Self::GPR, Self::SIMD>,
1792        target: Location<Self::GPR, Self::SIMD>,
1793        memarg: &MemArg,
1794        ret: Location<Self::GPR, Self::SIMD>,
1795        need_check: bool,
1796        imported_memories: bool,
1797        offset: i32,
1798        heap_access_oob: Label,
1799        unaligned_atomic: Label,
1800    ) -> Result<(), CompileError>;
1801    /// i64 atomic Or with unsigned 16bits
1802    #[allow(clippy::too_many_arguments)]
1803    fn i64_atomic_or_16u(
1804        &mut self,
1805        loc: Location<Self::GPR, Self::SIMD>,
1806        target: Location<Self::GPR, Self::SIMD>,
1807        memarg: &MemArg,
1808        ret: Location<Self::GPR, Self::SIMD>,
1809        need_check: bool,
1810        imported_memories: bool,
1811        offset: i32,
1812        heap_access_oob: Label,
1813        unaligned_atomic: Label,
1814    ) -> Result<(), CompileError>;
1815    /// i64 atomic Or with unsigned 32bits
1816    #[allow(clippy::too_many_arguments)]
1817    fn i64_atomic_or_32u(
1818        &mut self,
1819        loc: Location<Self::GPR, Self::SIMD>,
1820        target: Location<Self::GPR, Self::SIMD>,
1821        memarg: &MemArg,
1822        ret: Location<Self::GPR, Self::SIMD>,
1823        need_check: bool,
1824        imported_memories: bool,
1825        offset: i32,
1826        heap_access_oob: Label,
1827        unaligned_atomic: Label,
1828    ) -> Result<(), CompileError>;
1829    /// i64 atomic Xor with i64
1830    #[allow(clippy::too_many_arguments)]
1831    fn i64_atomic_xor(
1832        &mut self,
1833        loc: Location<Self::GPR, Self::SIMD>,
1834        target: Location<Self::GPR, Self::SIMD>,
1835        memarg: &MemArg,
1836        ret: Location<Self::GPR, Self::SIMD>,
1837        need_check: bool,
1838        imported_memories: bool,
1839        offset: i32,
1840        heap_access_oob: Label,
1841        unaligned_atomic: Label,
1842    ) -> Result<(), CompileError>;
1843    /// i64 atomic Xor with unsigned 8bits
1844    #[allow(clippy::too_many_arguments)]
1845    fn i64_atomic_xor_8u(
1846        &mut self,
1847        loc: Location<Self::GPR, Self::SIMD>,
1848        target: Location<Self::GPR, Self::SIMD>,
1849        memarg: &MemArg,
1850        ret: Location<Self::GPR, Self::SIMD>,
1851        need_check: bool,
1852        imported_memories: bool,
1853        offset: i32,
1854        heap_access_oob: Label,
1855        unaligned_atomic: Label,
1856    ) -> Result<(), CompileError>;
1857    /// i64 atomic Xor with unsigned 16bits
1858    #[allow(clippy::too_many_arguments)]
1859    fn i64_atomic_xor_16u(
1860        &mut self,
1861        loc: Location<Self::GPR, Self::SIMD>,
1862        target: Location<Self::GPR, Self::SIMD>,
1863        memarg: &MemArg,
1864        ret: Location<Self::GPR, Self::SIMD>,
1865        need_check: bool,
1866        imported_memories: bool,
1867        offset: i32,
1868        heap_access_oob: Label,
1869        unaligned_atomic: Label,
1870    ) -> Result<(), CompileError>;
1871    /// i64 atomic Xor with unsigned 32bits
1872    #[allow(clippy::too_many_arguments)]
1873    fn i64_atomic_xor_32u(
1874        &mut self,
1875        loc: Location<Self::GPR, Self::SIMD>,
1876        target: Location<Self::GPR, Self::SIMD>,
1877        memarg: &MemArg,
1878        ret: Location<Self::GPR, Self::SIMD>,
1879        need_check: bool,
1880        imported_memories: bool,
1881        offset: i32,
1882        heap_access_oob: Label,
1883        unaligned_atomic: Label,
1884    ) -> Result<(), CompileError>;
1885    /// i64 atomic Exchange with i64
1886    #[allow(clippy::too_many_arguments)]
1887    fn i64_atomic_xchg(
1888        &mut self,
1889        loc: Location<Self::GPR, Self::SIMD>,
1890        target: Location<Self::GPR, Self::SIMD>,
1891        memarg: &MemArg,
1892        ret: Location<Self::GPR, Self::SIMD>,
1893        need_check: bool,
1894        imported_memories: bool,
1895        offset: i32,
1896        heap_access_oob: Label,
1897        unaligned_atomic: Label,
1898    ) -> Result<(), CompileError>;
1899    /// i64 atomic Exchange with u8
1900    #[allow(clippy::too_many_arguments)]
1901    fn i64_atomic_xchg_8u(
1902        &mut self,
1903        loc: Location<Self::GPR, Self::SIMD>,
1904        target: Location<Self::GPR, Self::SIMD>,
1905        memarg: &MemArg,
1906        ret: Location<Self::GPR, Self::SIMD>,
1907        need_check: bool,
1908        imported_memories: bool,
1909        offset: i32,
1910        heap_access_oob: Label,
1911        unaligned_atomic: Label,
1912    ) -> Result<(), CompileError>;
1913    /// i64 atomic Exchange with u16
1914    #[allow(clippy::too_many_arguments)]
1915    fn i64_atomic_xchg_16u(
1916        &mut self,
1917        loc: Location<Self::GPR, Self::SIMD>,
1918        target: Location<Self::GPR, Self::SIMD>,
1919        memarg: &MemArg,
1920        ret: Location<Self::GPR, Self::SIMD>,
1921        need_check: bool,
1922        imported_memories: bool,
1923        offset: i32,
1924        heap_access_oob: Label,
1925        unaligned_atomic: Label,
1926    ) -> Result<(), CompileError>;
1927    /// i64 atomic Exchange with u32
1928    #[allow(clippy::too_many_arguments)]
1929    fn i64_atomic_xchg_32u(
1930        &mut self,
1931        loc: Location<Self::GPR, Self::SIMD>,
1932        target: Location<Self::GPR, Self::SIMD>,
1933        memarg: &MemArg,
1934        ret: Location<Self::GPR, Self::SIMD>,
1935        need_check: bool,
1936        imported_memories: bool,
1937        offset: i32,
1938        heap_access_oob: Label,
1939        unaligned_atomic: Label,
1940    ) -> Result<(), CompileError>;
1941    /// i64 atomic Compare and Exchange with i32
1942    #[allow(clippy::too_many_arguments)]
1943    fn i64_atomic_cmpxchg(
1944        &mut self,
1945        new: Location<Self::GPR, Self::SIMD>,
1946        cmp: Location<Self::GPR, Self::SIMD>,
1947        target: Location<Self::GPR, Self::SIMD>,
1948        memarg: &MemArg,
1949        ret: Location<Self::GPR, Self::SIMD>,
1950        need_check: bool,
1951        imported_memories: bool,
1952        offset: i32,
1953        heap_access_oob: Label,
1954        unaligned_atomic: Label,
1955    ) -> Result<(), CompileError>;
1956    /// i64 atomic Compare and Exchange with u8
1957    #[allow(clippy::too_many_arguments)]
1958    fn i64_atomic_cmpxchg_8u(
1959        &mut self,
1960        new: Location<Self::GPR, Self::SIMD>,
1961        cmp: Location<Self::GPR, Self::SIMD>,
1962        target: Location<Self::GPR, Self::SIMD>,
1963        memarg: &MemArg,
1964        ret: Location<Self::GPR, Self::SIMD>,
1965        need_check: bool,
1966        imported_memories: bool,
1967        offset: i32,
1968        heap_access_oob: Label,
1969        unaligned_atomic: Label,
1970    ) -> Result<(), CompileError>;
1971    /// i64 atomic Compare and Exchange with u16
1972    #[allow(clippy::too_many_arguments)]
1973    fn i64_atomic_cmpxchg_16u(
1974        &mut self,
1975        new: Location<Self::GPR, Self::SIMD>,
1976        cmp: Location<Self::GPR, Self::SIMD>,
1977        target: Location<Self::GPR, Self::SIMD>,
1978        memarg: &MemArg,
1979        ret: Location<Self::GPR, Self::SIMD>,
1980        need_check: bool,
1981        imported_memories: bool,
1982        offset: i32,
1983        heap_access_oob: Label,
1984        unaligned_atomic: Label,
1985    ) -> Result<(), CompileError>;
1986    /// i64 atomic Compare and Exchange with u32
1987    #[allow(clippy::too_many_arguments)]
1988    fn i64_atomic_cmpxchg_32u(
1989        &mut self,
1990        new: Location<Self::GPR, Self::SIMD>,
1991        cmp: Location<Self::GPR, Self::SIMD>,
1992        target: Location<Self::GPR, Self::SIMD>,
1993        memarg: &MemArg,
1994        ret: Location<Self::GPR, Self::SIMD>,
1995        need_check: bool,
1996        imported_memories: bool,
1997        offset: i32,
1998        heap_access_oob: Label,
1999        unaligned_atomic: Label,
2000    ) -> Result<(), CompileError>;
2001
2002    /// load an F32
2003    #[allow(clippy::too_many_arguments)]
2004    fn f32_load(
2005        &mut self,
2006        addr: Location<Self::GPR, Self::SIMD>,
2007        memarg: &MemArg,
2008        ret: Location<Self::GPR, Self::SIMD>,
2009        need_check: bool,
2010        imported_memories: bool,
2011        offset: i32,
2012        heap_access_oob: Label,
2013        unaligned_atomic: Label,
2014    ) -> Result<(), CompileError>;
2015    /// f32 save
2016    #[allow(clippy::too_many_arguments)]
2017    fn f32_save(
2018        &mut self,
2019        value: Location<Self::GPR, Self::SIMD>,
2020        memarg: &MemArg,
2021        addr: Location<Self::GPR, Self::SIMD>,
2022        canonicalize: bool,
2023        need_check: bool,
2024        imported_memories: bool,
2025        offset: i32,
2026        heap_access_oob: Label,
2027        unaligned_atomic: Label,
2028    ) -> Result<(), CompileError>;
2029    /// load an F64
2030    #[allow(clippy::too_many_arguments)]
2031    fn f64_load(
2032        &mut self,
2033        addr: Location<Self::GPR, Self::SIMD>,
2034        memarg: &MemArg,
2035        ret: Location<Self::GPR, Self::SIMD>,
2036        need_check: bool,
2037        imported_memories: bool,
2038        offset: i32,
2039        heap_access_oob: Label,
2040        unaligned_atomic: Label,
2041    ) -> Result<(), CompileError>;
2042    /// f64 save
2043    #[allow(clippy::too_many_arguments)]
2044    fn f64_save(
2045        &mut self,
2046        value: Location<Self::GPR, Self::SIMD>,
2047        memarg: &MemArg,
2048        addr: Location<Self::GPR, Self::SIMD>,
2049        canonicalize: bool,
2050        need_check: bool,
2051        imported_memories: bool,
2052        offset: i32,
2053        heap_access_oob: Label,
2054        unaligned_atomic: Label,
2055    ) -> Result<(), CompileError>;
2056    /// Convert a F64 from I64, signed or unsigned
2057    fn convert_f64_i64(
2058        &mut self,
2059        loc: Location<Self::GPR, Self::SIMD>,
2060        signed: bool,
2061        ret: Location<Self::GPR, Self::SIMD>,
2062    ) -> Result<(), CompileError>;
2063    /// Convert a F64 from I32, signed or unsigned
2064    fn convert_f64_i32(
2065        &mut self,
2066        loc: Location<Self::GPR, Self::SIMD>,
2067        signed: bool,
2068        ret: Location<Self::GPR, Self::SIMD>,
2069    ) -> Result<(), CompileError>;
2070    /// Convert a F32 from I64, signed or unsigned
2071    fn convert_f32_i64(
2072        &mut self,
2073        loc: Location<Self::GPR, Self::SIMD>,
2074        signed: bool,
2075        ret: Location<Self::GPR, Self::SIMD>,
2076    ) -> Result<(), CompileError>;
2077    /// Convert a F32 from I32, signed or unsigned
2078    fn convert_f32_i32(
2079        &mut self,
2080        loc: Location<Self::GPR, Self::SIMD>,
2081        signed: bool,
2082        ret: Location<Self::GPR, Self::SIMD>,
2083    ) -> Result<(), CompileError>;
2084    /// Convert a F64 to I64, signed or unsigned, without or without saturation
2085    fn convert_i64_f64(
2086        &mut self,
2087        loc: Location<Self::GPR, Self::SIMD>,
2088        ret: Location<Self::GPR, Self::SIMD>,
2089        signed: bool,
2090        sat: bool,
2091    ) -> Result<(), CompileError>;
2092    /// Convert a F64 to I32, signed or unsigned, without or without saturation
2093    fn convert_i32_f64(
2094        &mut self,
2095        loc: Location<Self::GPR, Self::SIMD>,
2096        ret: Location<Self::GPR, Self::SIMD>,
2097        signed: bool,
2098        sat: bool,
2099    ) -> Result<(), CompileError>;
2100    /// Convert a F32 to I64, signed or unsigned, without or without saturation
2101    fn convert_i64_f32(
2102        &mut self,
2103        loc: Location<Self::GPR, Self::SIMD>,
2104        ret: Location<Self::GPR, Self::SIMD>,
2105        signed: bool,
2106        sat: bool,
2107    ) -> Result<(), CompileError>;
2108    /// Convert a F32 to I32, signed or unsigned, without or without saturation
2109    fn convert_i32_f32(
2110        &mut self,
2111        loc: Location<Self::GPR, Self::SIMD>,
2112        ret: Location<Self::GPR, Self::SIMD>,
2113        signed: bool,
2114        sat: bool,
2115    ) -> Result<(), CompileError>;
2116    /// Convert a F32 to F64
2117    fn convert_f64_f32(
2118        &mut self,
2119        loc: Location<Self::GPR, Self::SIMD>,
2120        ret: Location<Self::GPR, Self::SIMD>,
2121    ) -> Result<(), CompileError>;
2122    /// Convert a F64 to F32
2123    fn convert_f32_f64(
2124        &mut self,
2125        loc: Location<Self::GPR, Self::SIMD>,
2126        ret: Location<Self::GPR, Self::SIMD>,
2127    ) -> Result<(), CompileError>;
2128    /// Negate an F64
2129    fn f64_neg(
2130        &mut self,
2131        loc: Location<Self::GPR, Self::SIMD>,
2132        ret: Location<Self::GPR, Self::SIMD>,
2133    ) -> Result<(), CompileError>;
2134    /// Get the Absolute Value of an F64
2135    fn f64_abs(
2136        &mut self,
2137        loc: Location<Self::GPR, Self::SIMD>,
2138        ret: Location<Self::GPR, Self::SIMD>,
2139    ) -> Result<(), CompileError>;
2140    /// Copy sign from tmp1 Self::GPR to tmp2 Self::GPR
2141    fn emit_i64_copysign(&mut self, tmp1: Self::GPR, tmp2: Self::GPR) -> Result<(), CompileError>;
2142    /// Get the Square Root of an F64
2143    fn f64_sqrt(
2144        &mut self,
2145        loc: Location<Self::GPR, Self::SIMD>,
2146        ret: Location<Self::GPR, Self::SIMD>,
2147    ) -> Result<(), CompileError>;
2148    /// Trunc of an F64
2149    fn f64_trunc(
2150        &mut self,
2151        loc: Location<Self::GPR, Self::SIMD>,
2152        ret: Location<Self::GPR, Self::SIMD>,
2153    ) -> Result<(), CompileError>;
2154    /// Ceil of an F64
2155    fn f64_ceil(
2156        &mut self,
2157        loc: Location<Self::GPR, Self::SIMD>,
2158        ret: Location<Self::GPR, Self::SIMD>,
2159    ) -> Result<(), CompileError>;
2160    /// Floor of an F64
2161    fn f64_floor(
2162        &mut self,
2163        loc: Location<Self::GPR, Self::SIMD>,
2164        ret: Location<Self::GPR, Self::SIMD>,
2165    ) -> Result<(), CompileError>;
2166    /// Round at nearest int of an F64
2167    fn f64_nearest(
2168        &mut self,
2169        loc: Location<Self::GPR, Self::SIMD>,
2170        ret: Location<Self::GPR, Self::SIMD>,
2171    ) -> Result<(), CompileError>;
2172    /// Greater of Equal Compare 2 F64, result in a GPR
2173    fn f64_cmp_ge(
2174        &mut self,
2175        loc_a: Location<Self::GPR, Self::SIMD>,
2176        loc_b: Location<Self::GPR, Self::SIMD>,
2177        ret: Location<Self::GPR, Self::SIMD>,
2178    ) -> Result<(), CompileError>;
2179    /// Greater Than Compare 2 F64, result in a GPR
2180    fn f64_cmp_gt(
2181        &mut self,
2182        loc_a: Location<Self::GPR, Self::SIMD>,
2183        loc_b: Location<Self::GPR, Self::SIMD>,
2184        ret: Location<Self::GPR, Self::SIMD>,
2185    ) -> Result<(), CompileError>;
2186    /// Less of Equal Compare 2 F64, result in a GPR
2187    fn f64_cmp_le(
2188        &mut self,
2189        loc_a: Location<Self::GPR, Self::SIMD>,
2190        loc_b: Location<Self::GPR, Self::SIMD>,
2191        ret: Location<Self::GPR, Self::SIMD>,
2192    ) -> Result<(), CompileError>;
2193    /// Less Than Compare 2 F64, result in a GPR
2194    fn f64_cmp_lt(
2195        &mut self,
2196        loc_a: Location<Self::GPR, Self::SIMD>,
2197        loc_b: Location<Self::GPR, Self::SIMD>,
2198        ret: Location<Self::GPR, Self::SIMD>,
2199    ) -> Result<(), CompileError>;
2200    /// Not Equal Compare 2 F64, result in a GPR
2201    fn f64_cmp_ne(
2202        &mut self,
2203        loc_a: Location<Self::GPR, Self::SIMD>,
2204        loc_b: Location<Self::GPR, Self::SIMD>,
2205        ret: Location<Self::GPR, Self::SIMD>,
2206    ) -> Result<(), CompileError>;
2207    /// Equal Compare 2 F64, result in a GPR
2208    fn f64_cmp_eq(
2209        &mut self,
2210        loc_a: Location<Self::GPR, Self::SIMD>,
2211        loc_b: Location<Self::GPR, Self::SIMD>,
2212        ret: Location<Self::GPR, Self::SIMD>,
2213    ) -> Result<(), CompileError>;
2214    /// get Min for 2 F64 values
2215    fn f64_min(
2216        &mut self,
2217        loc_a: Location<Self::GPR, Self::SIMD>,
2218        loc_b: Location<Self::GPR, Self::SIMD>,
2219        ret: Location<Self::GPR, Self::SIMD>,
2220    ) -> Result<(), CompileError>;
2221    /// get Max for 2 F64 values
2222    fn f64_max(
2223        &mut self,
2224        loc_a: Location<Self::GPR, Self::SIMD>,
2225        loc_b: Location<Self::GPR, Self::SIMD>,
2226        ret: Location<Self::GPR, Self::SIMD>,
2227    ) -> Result<(), CompileError>;
2228    /// Add 2 F64 values
2229    fn f64_add(
2230        &mut self,
2231        loc_a: Location<Self::GPR, Self::SIMD>,
2232        loc_b: Location<Self::GPR, Self::SIMD>,
2233        ret: Location<Self::GPR, Self::SIMD>,
2234    ) -> Result<(), CompileError>;
2235    /// Sub 2 F64 values
2236    fn f64_sub(
2237        &mut self,
2238        loc_a: Location<Self::GPR, Self::SIMD>,
2239        loc_b: Location<Self::GPR, Self::SIMD>,
2240        ret: Location<Self::GPR, Self::SIMD>,
2241    ) -> Result<(), CompileError>;
2242    /// Multiply 2 F64 values
2243    fn f64_mul(
2244        &mut self,
2245        loc_a: Location<Self::GPR, Self::SIMD>,
2246        loc_b: Location<Self::GPR, Self::SIMD>,
2247        ret: Location<Self::GPR, Self::SIMD>,
2248    ) -> Result<(), CompileError>;
2249    /// Divide 2 F64 values
2250    fn f64_div(
2251        &mut self,
2252        loc_a: Location<Self::GPR, Self::SIMD>,
2253        loc_b: Location<Self::GPR, Self::SIMD>,
2254        ret: Location<Self::GPR, Self::SIMD>,
2255    ) -> Result<(), CompileError>;
2256    /// Negate an F32
2257    fn f32_neg(
2258        &mut self,
2259        loc: Location<Self::GPR, Self::SIMD>,
2260        ret: Location<Self::GPR, Self::SIMD>,
2261    ) -> Result<(), CompileError>;
2262    /// Get the Absolute Value of an F32
2263    fn f32_abs(
2264        &mut self,
2265        loc: Location<Self::GPR, Self::SIMD>,
2266        ret: Location<Self::GPR, Self::SIMD>,
2267    ) -> Result<(), CompileError>;
2268    /// Copy sign from tmp1 Self::GPR to tmp2 Self::GPR
2269    fn emit_i32_copysign(&mut self, tmp1: Self::GPR, tmp2: Self::GPR) -> Result<(), CompileError>;
2270    /// Get the Square Root of an F32
2271    fn f32_sqrt(
2272        &mut self,
2273        loc: Location<Self::GPR, Self::SIMD>,
2274        ret: Location<Self::GPR, Self::SIMD>,
2275    ) -> Result<(), CompileError>;
2276    /// Trunc of an F32
2277    fn f32_trunc(
2278        &mut self,
2279        loc: Location<Self::GPR, Self::SIMD>,
2280        ret: Location<Self::GPR, Self::SIMD>,
2281    ) -> Result<(), CompileError>;
2282    /// Ceil of an F32
2283    fn f32_ceil(
2284        &mut self,
2285        loc: Location<Self::GPR, Self::SIMD>,
2286        ret: Location<Self::GPR, Self::SIMD>,
2287    ) -> Result<(), CompileError>;
2288    /// Floor of an F32
2289    fn f32_floor(
2290        &mut self,
2291        loc: Location<Self::GPR, Self::SIMD>,
2292        ret: Location<Self::GPR, Self::SIMD>,
2293    ) -> Result<(), CompileError>;
2294    /// Round at nearest int of an F32
2295    fn f32_nearest(
2296        &mut self,
2297        loc: Location<Self::GPR, Self::SIMD>,
2298        ret: Location<Self::GPR, Self::SIMD>,
2299    ) -> Result<(), CompileError>;
2300    /// Greater of Equal Compare 2 F32, result in a GPR
2301    fn f32_cmp_ge(
2302        &mut self,
2303        loc_a: Location<Self::GPR, Self::SIMD>,
2304        loc_b: Location<Self::GPR, Self::SIMD>,
2305        ret: Location<Self::GPR, Self::SIMD>,
2306    ) -> Result<(), CompileError>;
2307    /// Greater Than Compare 2 F32, result in a GPR
2308    fn f32_cmp_gt(
2309        &mut self,
2310        loc_a: Location<Self::GPR, Self::SIMD>,
2311        loc_b: Location<Self::GPR, Self::SIMD>,
2312        ret: Location<Self::GPR, Self::SIMD>,
2313    ) -> Result<(), CompileError>;
2314    /// Less of Equal Compare 2 F32, result in a GPR
2315    fn f32_cmp_le(
2316        &mut self,
2317        loc_a: Location<Self::GPR, Self::SIMD>,
2318        loc_b: Location<Self::GPR, Self::SIMD>,
2319        ret: Location<Self::GPR, Self::SIMD>,
2320    ) -> Result<(), CompileError>;
2321    /// Less Than Compare 2 F32, result in a GPR
2322    fn f32_cmp_lt(
2323        &mut self,
2324        loc_a: Location<Self::GPR, Self::SIMD>,
2325        loc_b: Location<Self::GPR, Self::SIMD>,
2326        ret: Location<Self::GPR, Self::SIMD>,
2327    ) -> Result<(), CompileError>;
2328    /// Not Equal Compare 2 F32, result in a GPR
2329    fn f32_cmp_ne(
2330        &mut self,
2331        loc_a: Location<Self::GPR, Self::SIMD>,
2332        loc_b: Location<Self::GPR, Self::SIMD>,
2333        ret: Location<Self::GPR, Self::SIMD>,
2334    ) -> Result<(), CompileError>;
2335    /// Equal Compare 2 F32, result in a GPR
2336    fn f32_cmp_eq(
2337        &mut self,
2338        loc_a: Location<Self::GPR, Self::SIMD>,
2339        loc_b: Location<Self::GPR, Self::SIMD>,
2340        ret: Location<Self::GPR, Self::SIMD>,
2341    ) -> Result<(), CompileError>;
2342    /// get Min for 2 F32 values
2343    fn f32_min(
2344        &mut self,
2345        loc_a: Location<Self::GPR, Self::SIMD>,
2346        loc_b: Location<Self::GPR, Self::SIMD>,
2347        ret: Location<Self::GPR, Self::SIMD>,
2348    ) -> Result<(), CompileError>;
2349    /// get Max for 2 F32 values
2350    fn f32_max(
2351        &mut self,
2352        loc_a: Location<Self::GPR, Self::SIMD>,
2353        loc_b: Location<Self::GPR, Self::SIMD>,
2354        ret: Location<Self::GPR, Self::SIMD>,
2355    ) -> Result<(), CompileError>;
2356    /// Add 2 F32 values
2357    fn f32_add(
2358        &mut self,
2359        loc_a: Location<Self::GPR, Self::SIMD>,
2360        loc_b: Location<Self::GPR, Self::SIMD>,
2361        ret: Location<Self::GPR, Self::SIMD>,
2362    ) -> Result<(), CompileError>;
2363    /// Sub 2 F32 values
2364    fn f32_sub(
2365        &mut self,
2366        loc_a: Location<Self::GPR, Self::SIMD>,
2367        loc_b: Location<Self::GPR, Self::SIMD>,
2368        ret: Location<Self::GPR, Self::SIMD>,
2369    ) -> Result<(), CompileError>;
2370    /// Multiply 2 F32 values
2371    fn f32_mul(
2372        &mut self,
2373        loc_a: Location<Self::GPR, Self::SIMD>,
2374        loc_b: Location<Self::GPR, Self::SIMD>,
2375        ret: Location<Self::GPR, Self::SIMD>,
2376    ) -> Result<(), CompileError>;
2377    /// Divide 2 F32 values
2378    fn f32_div(
2379        &mut self,
2380        loc_a: Location<Self::GPR, Self::SIMD>,
2381        loc_b: Location<Self::GPR, Self::SIMD>,
2382        ret: Location<Self::GPR, Self::SIMD>,
2383    ) -> Result<(), CompileError>;
2384
2385    /// Standard function Trampoline generation
2386    fn gen_std_trampoline(
2387        &self,
2388        sig: &FunctionType,
2389        calling_convention: CallingConvention,
2390    ) -> Result<FunctionBody, CompileError>;
2391    /// Generates dynamic import function call trampoline for a function type.
2392    fn gen_std_dynamic_import_trampoline(
2393        &self,
2394        vmoffsets: &VMOffsets,
2395        sig: &FunctionType,
2396        calling_convention: CallingConvention,
2397    ) -> Result<FunctionBody, CompileError>;
2398    /// Singlepass calls import functions through a trampoline.
2399    fn gen_import_call_trampoline(
2400        &self,
2401        vmoffsets: &VMOffsets,
2402        index: FunctionIndex,
2403        sig: &FunctionType,
2404        calling_convention: CallingConvention,
2405    ) -> Result<CustomSection, CompileError>;
2406    /// generate eh_frame instruction (or None if not possible / supported)
2407    fn gen_dwarf_unwind_info(&mut self, code_len: usize) -> Option<UnwindInstructions>;
2408    /// generate Windows unwind instructions (or None if not possible / supported)
2409    fn gen_windows_unwind_info(&mut self, code_len: usize) -> Option<Vec<u8>>;
2410}
2411
2412/// Standard entry trampoline generation
2413pub fn gen_std_trampoline(
2414    sig: &FunctionType,
2415    target: &Target,
2416    calling_convention: CallingConvention,
2417) -> Result<FunctionBody, CompileError> {
2418    match target.triple().architecture {
2419        Architecture::X86_64 => {
2420            let machine = MachineX86_64::new(Some(target.clone()))?;
2421            machine.gen_std_trampoline(sig, calling_convention)
2422        }
2423        Architecture::Aarch64(_) => {
2424            let machine = MachineARM64::new(Some(target.clone()));
2425            machine.gen_std_trampoline(sig, calling_convention)
2426        }
2427        _ => Err(CompileError::UnsupportedTarget(
2428            "singlepass unimplemented arch for gen_std_trampoline".to_owned(),
2429        )),
2430    }
2431}
2432
2433/// Generates dynamic import function call trampoline for a function type.
2434pub fn gen_std_dynamic_import_trampoline(
2435    vmoffsets: &VMOffsets,
2436    sig: &FunctionType,
2437    target: &Target,
2438    calling_convention: CallingConvention,
2439) -> Result<FunctionBody, CompileError> {
2440    match target.triple().architecture {
2441        Architecture::X86_64 => {
2442            let machine = MachineX86_64::new(Some(target.clone()))?;
2443            machine.gen_std_dynamic_import_trampoline(vmoffsets, sig, calling_convention)
2444        }
2445        Architecture::Aarch64(_) => {
2446            let machine = MachineARM64::new(Some(target.clone()));
2447            machine.gen_std_dynamic_import_trampoline(vmoffsets, sig, calling_convention)
2448        }
2449        _ => Err(CompileError::UnsupportedTarget(
2450            "singlepass unimplemented arch for gen_std_dynamic_import_trampoline".to_owned(),
2451        )),
2452    }
2453}
2454/// Singlepass calls import functions through a trampoline.
2455pub fn gen_import_call_trampoline(
2456    vmoffsets: &VMOffsets,
2457    index: FunctionIndex,
2458    sig: &FunctionType,
2459    target: &Target,
2460    calling_convention: CallingConvention,
2461) -> Result<CustomSection, CompileError> {
2462    match target.triple().architecture {
2463        Architecture::X86_64 => {
2464            let machine = MachineX86_64::new(Some(target.clone()))?;
2465            machine.gen_import_call_trampoline(vmoffsets, index, sig, calling_convention)
2466        }
2467        Architecture::Aarch64(_) => {
2468            let machine = MachineARM64::new(Some(target.clone()));
2469            machine.gen_import_call_trampoline(vmoffsets, index, sig, calling_convention)
2470        }
2471        _ => Err(CompileError::UnsupportedTarget(
2472            "singlepass unimplemented arch for gen_import_call_trampoline".to_owned(),
2473        )),
2474    }
2475}
2476
2477// Constants for the bounds of truncation operations. These are the least or
2478// greatest exact floats in either f32 or f64 representation less-than (for
2479// least) or greater-than (for greatest) the i32 or i64 or u32 or u64
2480// min (for least) or max (for greatest), when rounding towards zero.
2481
2482/// Greatest Exact Float (32 bits) less-than i32::MIN when rounding towards zero.
2483pub const GEF32_LT_I32_MIN: f32 = -2147483904.0;
2484/// Least Exact Float (32 bits) greater-than i32::MAX when rounding towards zero.
2485pub const LEF32_GT_I32_MAX: f32 = 2147483648.0;
2486/// Greatest Exact Float (32 bits) less-than i64::MIN when rounding towards zero.
2487pub const GEF32_LT_I64_MIN: f32 = -9223373136366403584.0;
2488/// Least Exact Float (32 bits) greater-than i64::MAX when rounding towards zero.
2489pub const LEF32_GT_I64_MAX: f32 = 9223372036854775808.0;
2490/// Greatest Exact Float (32 bits) less-than u32::MIN when rounding towards zero.
2491pub const GEF32_LT_U32_MIN: f32 = -1.0;
2492/// Least Exact Float (32 bits) greater-than u32::MAX when rounding towards zero.
2493pub const LEF32_GT_U32_MAX: f32 = 4294967296.0;
2494/// Greatest Exact Float (32 bits) less-than u64::MIN when rounding towards zero.
2495pub const GEF32_LT_U64_MIN: f32 = -1.0;
2496/// Least Exact Float (32 bits) greater-than u64::MAX when rounding towards zero.
2497pub const LEF32_GT_U64_MAX: f32 = 18446744073709551616.0;
2498
2499/// Greatest Exact Float (64 bits) less-than i32::MIN when rounding towards zero.
2500pub const GEF64_LT_I32_MIN: f64 = -2147483649.0;
2501/// Least Exact Float (64 bits) greater-than i32::MAX when rounding towards zero.
2502pub const LEF64_GT_I32_MAX: f64 = 2147483648.0;
2503/// Greatest Exact Float (64 bits) less-than i64::MIN when rounding towards zero.
2504pub const GEF64_LT_I64_MIN: f64 = -9223372036854777856.0;
2505/// Least Exact Float (64 bits) greater-than i64::MAX when rounding towards zero.
2506pub const LEF64_GT_I64_MAX: f64 = 9223372036854775808.0;
2507/// Greatest Exact Float (64 bits) less-than u32::MIN when rounding towards zero.
2508pub const GEF64_LT_U32_MIN: f64 = -1.0;
2509/// Least Exact Float (64 bits) greater-than u32::MAX when rounding towards zero.
2510pub const LEF64_GT_U32_MAX: f64 = 4294967296.0;
2511/// Greatest Exact Float (64 bits) less-than u64::MIN when rounding towards zero.
2512pub const GEF64_LT_U64_MIN: f64 = -1.0;
2513/// Least Exact Float (64 bits) greater-than u64::MAX when rounding towards zero.
2514pub const LEF64_GT_U64_MAX: f64 = 18446744073709551616.0;