wasmer_compiler_llvm/
config.rs

1use crate::compiler::LLVMCompiler;
2use enum_iterator::Sequence;
3pub use inkwell::OptimizationLevel as LLVMOptLevel;
4use inkwell::targets::{
5    CodeModel, InitializationConfig, RelocMode, Target as InkwellTarget, TargetMachine,
6    TargetMachineOptions, TargetTriple,
7};
8use itertools::Itertools;
9use std::fs::File;
10use std::io::{self, Write};
11use std::path::PathBuf;
12use std::sync::Arc;
13use std::{fmt::Debug, num::NonZero};
14use target_lexicon::BinaryFormat;
15use wasmer_compiler::misc::{CompiledKind, function_kind_to_filename};
16use wasmer_compiler::{Compiler, CompilerConfig, Engine, EngineBuilder, ModuleMiddleware};
17use wasmer_types::{
18    Features,
19    target::{Architecture, OperatingSystem, Target, Triple},
20};
21
22/// The InkWell ModuleInfo type
23pub type InkwellModule<'ctx> = inkwell::module::Module<'ctx>;
24
25/// The InkWell MemoryBuffer type
26pub type InkwellMemoryBuffer = inkwell::memory_buffer::MemoryBuffer;
27
28/// Callbacks to the different LLVM compilation phases.
29#[derive(Debug, Clone)]
30pub struct LLVMCallbacks {
31    debug_dir: PathBuf,
32}
33
34impl LLVMCallbacks {
35    pub fn new(debug_dir: PathBuf) -> Result<Self, io::Error> {
36        // Create the debug dir in case it doesn't exist
37        std::fs::create_dir_all(&debug_dir)?;
38        Ok(Self { debug_dir })
39    }
40
41    fn base_path(&self, module_hash: &Option<String>) -> PathBuf {
42        let mut path = self.debug_dir.clone();
43        if let Some(hash) = module_hash {
44            path.push(hash);
45        }
46        std::fs::create_dir_all(&path)
47            .unwrap_or_else(|_| panic!("cannot create debug directory: {}", path.display()));
48        path
49    }
50
51    pub fn preopt_ir(
52        &self,
53        kind: &CompiledKind,
54        module_hash: &Option<String>,
55        module: &InkwellModule,
56    ) {
57        let mut path = self.base_path(module_hash);
58        path.push(function_kind_to_filename(kind, ".preopt.ll"));
59        module
60            .print_to_file(&path)
61            .expect("Error while dumping pre optimized LLVM IR");
62    }
63    pub fn postopt_ir(
64        &self,
65        kind: &CompiledKind,
66        module_hash: &Option<String>,
67        module: &InkwellModule,
68    ) {
69        let mut path = self.base_path(module_hash);
70        path.push(function_kind_to_filename(kind, ".postopt.ll"));
71        module
72            .print_to_file(&path)
73            .expect("Error while dumping post optimized LLVM IR");
74    }
75    pub fn obj_memory_buffer(
76        &self,
77        kind: &CompiledKind,
78        module_hash: &Option<String>,
79        memory_buffer: &InkwellMemoryBuffer,
80    ) {
81        let mut path = self.base_path(module_hash);
82        path.push(function_kind_to_filename(kind, ".o"));
83        let mem_buf_slice = memory_buffer.as_slice();
84        let mut file =
85            File::create(path).expect("Error while creating debug object file from LLVM IR");
86        file.write_all(mem_buf_slice).unwrap();
87    }
88
89    pub fn asm_memory_buffer(
90        &self,
91        kind: &CompiledKind,
92        module_hash: &Option<String>,
93        asm_memory_buffer: &InkwellMemoryBuffer,
94    ) {
95        let mut path = self.base_path(module_hash);
96        path.push(function_kind_to_filename(kind, ".s"));
97        let mem_buf_slice = asm_memory_buffer.as_slice();
98        let mut file =
99            File::create(path).expect("Error while creating debug assembly file from LLVM IR");
100        file.write_all(mem_buf_slice).unwrap();
101    }
102}
103
104#[derive(Debug, Clone)]
105pub struct LLVM {
106    pub(crate) enable_nan_canonicalization: bool,
107    pub(crate) enable_non_volatile_memops: bool,
108    pub(crate) enable_verifier: bool,
109    pub(crate) enable_perfmap: bool,
110    pub(crate) opt_level: LLVMOptLevel,
111    is_pic: bool,
112    pub(crate) callbacks: Option<LLVMCallbacks>,
113    /// The middleware chain.
114    pub(crate) middlewares: Vec<Arc<dyn ModuleMiddleware>>,
115    /// Number of threads to use when compiling a module.
116    pub(crate) num_threads: NonZero<usize>,
117    pub(crate) verbose_asm: bool,
118}
119
120#[derive(Clone, Copy, Eq, PartialEq, Hash, Debug, Sequence)]
121pub(crate) enum OptimizationStyle {
122    ForSpeed,
123    ForSize,
124    Disabled,
125}
126
127impl LLVM {
128    /// Creates a new configuration object with the default configuration
129    /// specified.
130    pub fn new() -> Self {
131        Self {
132            enable_nan_canonicalization: false,
133            enable_non_volatile_memops: false,
134            enable_verifier: false,
135            enable_perfmap: false,
136            opt_level: LLVMOptLevel::Aggressive,
137            is_pic: false,
138            callbacks: None,
139            middlewares: vec![],
140            verbose_asm: false,
141            num_threads: std::thread::available_parallelism().unwrap_or(NonZero::new(1).unwrap()),
142        }
143    }
144
145    /// The optimization levels when optimizing the IR.
146    pub fn opt_level(&mut self, opt_level: LLVMOptLevel) -> &mut Self {
147        self.opt_level = opt_level;
148        self
149    }
150
151    pub fn num_threads(&mut self, num_threads: NonZero<usize>) -> &mut Self {
152        self.num_threads = num_threads;
153        self
154    }
155
156    pub fn verbose_asm(&mut self, verbose_asm: bool) -> &mut Self {
157        self.verbose_asm = verbose_asm;
158        self
159    }
160
161    /// Callbacks that will triggered in the different compilation
162    /// phases in LLVM.
163    pub fn callbacks(&mut self, callbacks: Option<LLVMCallbacks>) -> &mut Self {
164        self.callbacks = callbacks;
165        self
166    }
167
168    /// For the LLVM compiler, we can use non-volatile memory operations which lead to a better performance
169    /// (but are not 100% SPEC compliant).
170    pub fn non_volatile_memops(&mut self, enable_non_volatile_memops: bool) -> &mut Self {
171        self.enable_non_volatile_memops = enable_non_volatile_memops;
172        self
173    }
174
175    fn reloc_mode(&self, binary_format: BinaryFormat) -> RelocMode {
176        if matches!(binary_format, BinaryFormat::Macho) {
177            return RelocMode::Static;
178        }
179
180        if self.is_pic {
181            RelocMode::PIC
182        } else {
183            RelocMode::Static
184        }
185    }
186
187    fn code_model(&self, binary_format: BinaryFormat) -> CodeModel {
188        // We normally use the large code model, but when targeting shared
189        // objects, we are required to use PIC. If we use PIC anyways, we lose
190        // any benefit from large code model and there's some cost on all
191        // platforms, plus some platforms (MachO) don't support PIC + large
192        // at all.
193        if matches!(binary_format, BinaryFormat::Macho) {
194            return CodeModel::Default;
195        }
196
197        if self.is_pic {
198            CodeModel::Small
199        } else {
200            CodeModel::Large
201        }
202    }
203
204    pub(crate) fn target_operating_system(&self, target: &Target) -> OperatingSystem {
205        match target.triple().operating_system {
206            OperatingSystem::Darwin(deployment) if !self.is_pic => {
207                // LLVM detects static relocation + darwin + 64-bit and
208                // force-enables PIC because MachO doesn't support that
209                // combination. They don't check whether they're targeting
210                // MachO, they check whether the OS is set to Darwin.
211                //
212                // Since both linux and darwin use SysV ABI, this should work.
213                //  but not in the case of Aarch64, there the ABI is slightly different
214                #[allow(clippy::match_single_binding)]
215                match target.triple().architecture {
216                    Architecture::Aarch64(_) => OperatingSystem::Darwin(deployment),
217                    _ => OperatingSystem::Linux,
218                }
219            }
220            other => other,
221        }
222    }
223
224    pub(crate) fn target_binary_format(&self, target: &Target) -> target_lexicon::BinaryFormat {
225        if self.is_pic {
226            target.triple().binary_format
227        } else {
228            match self.target_operating_system(target) {
229                OperatingSystem::Darwin(_) => target_lexicon::BinaryFormat::Macho,
230                _ => target_lexicon::BinaryFormat::Elf,
231            }
232        }
233    }
234
235    fn target_triple(&self, target: &Target) -> TargetTriple {
236        let architecture = if target.triple().architecture
237            == Architecture::Riscv64(target_lexicon::Riscv64Architecture::Riscv64gc)
238        {
239            target_lexicon::Architecture::Riscv64(target_lexicon::Riscv64Architecture::Riscv64)
240        } else {
241            target.triple().architecture
242        };
243        // Hack: we're using is_pic to determine whether this is a native
244        // build or not.
245
246        let operating_system = self.target_operating_system(target);
247        let binary_format = self.target_binary_format(target);
248
249        let triple = Triple {
250            architecture,
251            vendor: target.triple().vendor.clone(),
252            operating_system,
253            environment: target.triple().environment,
254            binary_format,
255        };
256        TargetTriple::create(&triple.to_string())
257    }
258
259    /// Generates the target machine for the current target
260    pub fn target_machine(&self, target: &Target) -> TargetMachine {
261        self.target_machine_with_opt(target, OptimizationStyle::ForSpeed)
262    }
263
264    pub(crate) fn target_machine_with_opt(
265        &self,
266        target: &Target,
267        opt_style: OptimizationStyle,
268    ) -> TargetMachine {
269        let triple = target.triple();
270        let cpu_features = &target.cpu_features();
271
272        match triple.architecture {
273            Architecture::X86_64 | Architecture::X86_32(_) => {
274                InkwellTarget::initialize_x86(&InitializationConfig {
275                    asm_parser: true,
276                    asm_printer: true,
277                    base: true,
278                    disassembler: true,
279                    info: true,
280                    machine_code: true,
281                })
282            }
283            Architecture::Aarch64(_) => InkwellTarget::initialize_aarch64(&InitializationConfig {
284                asm_parser: true,
285                asm_printer: true,
286                base: true,
287                disassembler: true,
288                info: true,
289                machine_code: true,
290            }),
291            Architecture::Riscv64(_) | Architecture::Riscv32(_) => {
292                InkwellTarget::initialize_riscv(&InitializationConfig {
293                    asm_parser: true,
294                    asm_printer: true,
295                    base: true,
296                    disassembler: true,
297                    info: true,
298                    machine_code: true,
299                })
300            }
301            Architecture::LoongArch64 => {
302                InkwellTarget::initialize_loongarch(&InitializationConfig {
303                    asm_parser: true,
304                    asm_printer: true,
305                    base: true,
306                    disassembler: true,
307                    info: true,
308                    machine_code: true,
309                })
310            }
311            _ => unimplemented!("target {} not yet supported in Wasmer", triple),
312        }
313
314        // The CPU features formatted as LLVM strings
315        // We can safely map to gcc-like features as the CPUFeatures
316        // are compliant with the same string representations as gcc.
317        let llvm_cpu_features = cpu_features
318            .iter()
319            .map(|feature| format!("+{feature}"))
320            .join(",");
321
322        let target_triple = self.target_triple(target);
323        let llvm_target = InkwellTarget::from_triple(&target_triple).unwrap();
324        let mut llvm_target_machine_options = TargetMachineOptions::new()
325            .set_cpu(match triple.architecture {
326                Architecture::Riscv64(_) => "generic-rv64",
327                Architecture::Riscv32(_) => "generic-rv32",
328                Architecture::LoongArch64 => "generic-la64",
329                _ => "generic",
330            })
331            .set_features(match triple.architecture {
332                Architecture::Riscv64(_) => "+m,+a,+c,+d,+f",
333                Architecture::Riscv32(_) => "+m,+a,+c,+d,+f",
334                Architecture::LoongArch64 => "+f,+d",
335                _ => &llvm_cpu_features,
336            })
337            .set_level(match opt_style {
338                OptimizationStyle::ForSpeed => self.opt_level,
339                OptimizationStyle::ForSize => LLVMOptLevel::Less,
340                OptimizationStyle::Disabled => LLVMOptLevel::None,
341            })
342            .set_reloc_mode(self.reloc_mode(self.target_binary_format(target)))
343            .set_code_model(match triple.architecture {
344                Architecture::LoongArch64 | Architecture::Riscv64(_) | Architecture::Riscv32(_) => {
345                    CodeModel::Medium
346                }
347                _ => self.code_model(self.target_binary_format(target)),
348            });
349        if let Architecture::Riscv64(_) = triple.architecture {
350            llvm_target_machine_options = llvm_target_machine_options.set_abi("lp64d");
351        }
352        let target_machine = llvm_target
353            .create_target_machine_from_options(&target_triple, llvm_target_machine_options)
354            .unwrap();
355        target_machine.set_asm_verbosity(self.verbose_asm);
356        target_machine
357    }
358}
359
360impl CompilerConfig for LLVM {
361    /// Emit code suitable for dlopen.
362    fn enable_pic(&mut self) {
363        // TODO: although we can emit PIC, the object file parser does not yet
364        // support all the relocations.
365        self.is_pic = true;
366    }
367
368    fn enable_perfmap(&mut self) {
369        self.enable_perfmap = true
370    }
371
372    /// Whether to verify compiler IR.
373    fn enable_verifier(&mut self) {
374        self.enable_verifier = true;
375    }
376
377    /// For the LLVM compiler, we can use non-volatile memory operations which lead to a better performance
378    /// (but are not 100% SPEC compliant).
379    fn enable_non_volatile_memops(&mut self) {
380        self.enable_non_volatile_memops = true;
381    }
382
383    fn canonicalize_nans(&mut self, enable: bool) {
384        self.enable_nan_canonicalization = enable;
385    }
386
387    /// Transform it into the compiler.
388    fn compiler(self: Box<Self>) -> Box<dyn Compiler> {
389        Box::new(LLVMCompiler::new(*self))
390    }
391
392    /// Pushes a middleware onto the back of the middleware chain.
393    fn push_middleware(&mut self, middleware: Arc<dyn ModuleMiddleware>) {
394        self.middlewares.push(middleware);
395    }
396
397    fn supported_features_for_target(&self, _target: &Target) -> wasmer_types::Features {
398        let mut feats = Features::default();
399        feats.exceptions(true);
400        feats.relaxed_simd(true);
401        feats.wide_arithmetic(true);
402        feats.tail_call(true);
403        feats
404    }
405}
406
407impl Default for LLVM {
408    fn default() -> LLVM {
409        Self::new()
410    }
411}
412
413impl From<LLVM> for Engine {
414    fn from(config: LLVM) -> Self {
415        EngineBuilder::new(config).engine()
416    }
417}