wasmer_compiler_singlepass/
x64_decl.rs1#![allow(clippy::upper_case_acronyms)]
4use crate::location::CombinedRegister;
5use crate::location::Reg as AbstractReg;
6use std::slice::Iter;
7use wasmer_types::{CompileError, Type, target::CallingConvention};
8
9#[repr(u8)]
11#[derive(Copy, Clone, Debug, Eq, PartialEq, Ord, PartialOrd, Hash)]
12pub enum GPR {
13 RAX = 0,
14 RCX = 1,
15 RDX = 2,
16 RBX = 3,
17 RSP = 4,
18 RBP = 5,
19 RSI = 6,
20 RDI = 7,
21 R8 = 8,
22 R9 = 9,
23 R10 = 10,
24 R11 = 11,
25 R12 = 12,
26 R13 = 13,
27 R14 = 14,
28 R15 = 15,
29}
30
31impl From<GPR> for u8 {
32 fn from(val: GPR) -> Self {
33 val as u8
34 }
35}
36
37#[repr(u8)]
39#[derive(Copy, Clone, Debug, Eq, PartialEq, Ord, PartialOrd, Hash)]
40#[allow(dead_code)]
41pub enum XMM {
42 XMM0 = 0,
43 XMM1 = 1,
44 XMM2 = 2,
45 XMM3 = 3,
46 XMM4 = 4,
47 XMM5 = 5,
48 XMM6 = 6,
49 XMM7 = 7,
50 XMM8 = 8,
51 XMM9 = 9,
52 XMM10 = 10,
53 XMM11 = 11,
54 XMM12 = 12,
55 XMM13 = 13,
56 XMM14 = 14,
57 XMM15 = 15,
58}
59
60impl From<XMM> for u8 {
61 fn from(val: XMM) -> Self {
62 val as u8
63 }
64}
65
66impl AbstractReg for GPR {
67 fn into_index(self) -> usize {
68 self as usize
69 }
70 fn from_index(n: usize) -> Result<GPR, ()> {
71 match n {
72 0..=15 => Ok(*GPR::iterator().nth(n).unwrap()),
73 _ => Err(()),
74 }
75 }
76 fn iterator() -> Iter<'static, GPR> {
77 static GPRS: [GPR; 16] = [
78 GPR::RAX,
79 GPR::RCX,
80 GPR::RDX,
81 GPR::RBX,
82 GPR::RSP,
83 GPR::RBP,
84 GPR::RSI,
85 GPR::RDI,
86 GPR::R8,
87 GPR::R9,
88 GPR::R10,
89 GPR::R11,
90 GPR::R12,
91 GPR::R13,
92 GPR::R14,
93 GPR::R15,
94 ];
95 GPRS.iter()
96 }
97 #[cfg(feature = "unwind")]
98 fn to_dwarf(self) -> gimli::Register {
99 use gimli::X86_64;
100
101 match self {
102 GPR::RAX => X86_64::RAX,
103 GPR::RCX => X86_64::RCX,
104 GPR::RDX => X86_64::RDX,
105 GPR::RBX => X86_64::RBX,
106 GPR::RSP => X86_64::RSP,
107 GPR::RBP => X86_64::RBP,
108 GPR::RSI => X86_64::RSI,
109 GPR::RDI => X86_64::RDI,
110 GPR::R8 => X86_64::R8,
111 GPR::R9 => X86_64::R9,
112 GPR::R10 => X86_64::R10,
113 GPR::R11 => X86_64::R11,
114 GPR::R12 => X86_64::R12,
115 GPR::R13 => X86_64::R13,
116 GPR::R14 => X86_64::R14,
117 GPR::R15 => X86_64::R15,
118 }
119 }
120}
121
122impl AbstractReg for XMM {
123 fn into_index(self) -> usize {
124 self as usize
125 }
126 fn from_index(n: usize) -> Result<XMM, ()> {
127 match n {
128 0..=15 => Ok(*XMM::iterator().nth(n).unwrap()),
129 _ => Err(()),
130 }
131 }
132 fn iterator() -> Iter<'static, XMM> {
133 static XMMS: [XMM; 16] = [
134 XMM::XMM0,
135 XMM::XMM1,
136 XMM::XMM2,
137 XMM::XMM3,
138 XMM::XMM4,
139 XMM::XMM5,
140 XMM::XMM6,
141 XMM::XMM7,
142 XMM::XMM8,
143 XMM::XMM9,
144 XMM::XMM10,
145 XMM::XMM11,
146 XMM::XMM12,
147 XMM::XMM13,
148 XMM::XMM14,
149 XMM::XMM15,
150 ];
151 XMMS.iter()
152 }
153 #[cfg(feature = "unwind")]
154 fn to_dwarf(self) -> gimli::Register {
155 use gimli::X86_64;
156
157 match self {
158 XMM::XMM0 => X86_64::XMM0,
159 XMM::XMM1 => X86_64::XMM1,
160 XMM::XMM2 => X86_64::XMM2,
161 XMM::XMM3 => X86_64::XMM3,
162 XMM::XMM4 => X86_64::XMM4,
163 XMM::XMM5 => X86_64::XMM5,
164 XMM::XMM6 => X86_64::XMM6,
165 XMM::XMM7 => X86_64::XMM7,
166 XMM::XMM8 => X86_64::XMM8,
167 XMM::XMM9 => X86_64::XMM9,
168 XMM::XMM10 => X86_64::XMM10,
169 XMM::XMM11 => X86_64::XMM11,
170 XMM::XMM12 => X86_64::XMM12,
171 XMM::XMM13 => X86_64::XMM13,
172 XMM::XMM14 => X86_64::XMM14,
173 XMM::XMM15 => X86_64::XMM15,
174 }
175 }
176}
177
178#[derive(Copy, Clone, Debug, Eq, PartialEq)]
180pub enum X64Register {
181 GPR(GPR),
183 XMM(XMM),
185}
186
187impl CombinedRegister for X64Register {
188 fn from_gpr(x: u16) -> Self {
190 X64Register::GPR(GPR::from_index(x as usize).unwrap())
191 }
192 fn from_simd(x: u16) -> Self {
194 X64Register::XMM(XMM::from_index(x as usize).unwrap())
195 }
196
197 }
231
232#[derive(Default)]
234pub struct ArgumentRegisterAllocator {
235 n_gprs: usize,
236 n_xmms: usize,
237}
238
239impl ArgumentRegisterAllocator {
240 pub fn next(
242 &mut self,
243 ty: Type,
244 calling_convention: CallingConvention,
245 ) -> Result<Option<X64Register>, CompileError> {
246 let ret = match calling_convention {
247 CallingConvention::WindowsFastcall => {
248 static GPR_SEQ: &[GPR] = &[GPR::RCX, GPR::RDX, GPR::R8, GPR::R9];
249 static XMM_SEQ: &[XMM] = &[XMM::XMM0, XMM::XMM1, XMM::XMM2, XMM::XMM3];
250 let idx = self.n_gprs + self.n_xmms;
251 match ty {
252 Type::I32 | Type::I64 => {
253 if idx < 4 {
254 let gpr = GPR_SEQ[idx];
255 self.n_gprs += 1;
256 Some(X64Register::GPR(gpr))
257 } else {
258 None
259 }
260 }
261 Type::F32 | Type::F64 => {
262 if idx < 4 {
263 let xmm = XMM_SEQ[idx];
264 self.n_xmms += 1;
265 Some(X64Register::XMM(xmm))
266 } else {
267 None
268 }
269 }
270 _ => {
271 return Err(CompileError::Codegen(format!(
272 "No register available for {calling_convention:?} and type {ty}"
273 )));
274 }
275 }
276 }
277 _ => {
278 static GPR_SEQ: &[GPR] =
279 &[GPR::RDI, GPR::RSI, GPR::RDX, GPR::RCX, GPR::R8, GPR::R9];
280 static XMM_SEQ: &[XMM] = &[
281 XMM::XMM0,
282 XMM::XMM1,
283 XMM::XMM2,
284 XMM::XMM3,
285 XMM::XMM4,
286 XMM::XMM5,
287 XMM::XMM6,
288 XMM::XMM7,
289 ];
290 match ty {
291 Type::I32 | Type::I64 => {
292 if self.n_gprs < GPR_SEQ.len() {
293 let gpr = GPR_SEQ[self.n_gprs];
294 self.n_gprs += 1;
295 Some(X64Register::GPR(gpr))
296 } else {
297 None
298 }
299 }
300 Type::F32 | Type::F64 => {
301 if self.n_xmms < XMM_SEQ.len() {
302 let xmm = XMM_SEQ[self.n_xmms];
303 self.n_xmms += 1;
304 Some(X64Register::XMM(xmm))
305 } else {
306 None
307 }
308 }
309 _ => {
310 return Err(CompileError::Codegen(format!(
311 "No register available for {calling_convention:?} and type {ty}"
312 )));
313 }
314 }
315 }
316 };
317
318 Ok(ret)
319 }
320}