wasmer_compiler_cranelift/heap.rs
1//! Heaps to implement WebAssembly linear memories.
2
3use cranelift_codegen::ir::{GlobalValue, MemoryType, Type};
4use wasmer_types::entity::entity_impl;
5
6/// An opaque reference to a [`HeapData`][crate::HeapData].
7///
8/// While the order is stable, it is arbitrary.
9#[derive(Copy, Clone, PartialEq, Eq, Hash, PartialOrd, Ord)]
10#[cfg_attr(
11 feature = "enable-serde",
12 derive(serde_derive::Serialize, serde_derive::Deserialize)
13)]
14pub struct Heap(u32);
15entity_impl!(Heap, "heap");
16
17/// A heap implementing a WebAssembly linear memory.
18///
19/// Code compiled from WebAssembly runs in a sandbox where it can't access all
20/// process memory. Instead, it is given a small set of memory areas to work in,
21/// and all accesses are bounds checked. `cranelift-wasm` models this through
22/// the concept of *heaps*.
23///
24/// Heap addresses can be smaller than the native pointer size, for example
25/// unsigned `i32` offsets on a 64-bit architecture.
26///
27/// A heap appears as three consecutive ranges of address space:
28///
29/// 1. The *mapped pages* are the accessible memory range in the heap. A heap
30/// may have a minimum guaranteed size which means that some mapped pages are
31/// always present.
32///
33/// 2. The *unmapped pages* is a possibly empty range of address space that may
34/// be mapped in the future when the heap is grown. They are addressable but
35/// not accessible.
36///
37/// 3. The *offset-guard pages* is a range of address space that is guaranteed
38/// to always cause a trap when accessed. It is used to optimize bounds
39/// checking for heap accesses with a shared base pointer. They are
40/// addressable but not accessible.
41///
42/// The *heap bound* is the total size of the mapped and unmapped pages. This is
43/// the bound that `heap_addr` checks against. Memory accesses inside the heap
44/// bounds can trap if they hit an unmapped page (which is not accessible).
45///
46/// Two styles of heaps are supported, *static* and *dynamic*. They behave
47/// differently when resized.
48///
49/// #### Static heaps
50///
51/// A *static heap* starts out with all the address space it will ever need, so
52/// it never moves to a different address. At the base address is a number of
53/// mapped pages corresponding to the heap's current size. Then follows a number
54/// of unmapped pages where the heap can grow up to its maximum size. After the
55/// unmapped pages follow the offset-guard pages which are also guaranteed to
56/// generate a trap when accessed.
57///
58/// #### Dynamic heaps
59///
60/// A *dynamic heap* can be relocated to a different base address when it is
61/// resized, and its bound can move dynamically. The offset-guard pages move
62/// when the heap is resized. The bound of a dynamic heap is stored in a global
63/// value.
64#[derive(Clone, PartialEq, Hash)]
65#[cfg_attr(
66 feature = "enable-serde",
67 derive(serde_derive::Serialize, serde_derive::Deserialize)
68)]
69pub struct HeapData {
70 /// The address of the start of the heap's storage.
71 pub base: GlobalValue,
72
73 /// Guaranteed minimum heap size in bytes. Heap accesses before `min_size`
74 /// don't need bounds checking.
75 pub min_size: u64,
76
77 /// The maximum heap size in bytes.
78 ///
79 /// Heap accesses larger than this will always trap.
80 pub max_size: Option<u64>,
81
82 /// The memory type for the pointed-to memory, if using proof-carrying code.
83 pub memory_type: Option<MemoryType>,
84
85 /// Size in bytes of the offset-guard pages following the heap.
86 pub offset_guard_size: u64,
87
88 /// Heap style, with additional style-specific info.
89 pub style: HeapStyle,
90
91 /// The index type for the heap.
92 pub index_type: Type,
93
94 /// The log2 of this memory's page size.
95 pub page_size_log2: u8,
96}
97
98/// Style of heap including style-specific information.
99#[derive(Clone, PartialEq, Hash)]
100#[cfg_attr(
101 feature = "enable-serde",
102 derive(serde_derive::Serialize, serde_derive::Deserialize)
103)]
104pub enum HeapStyle {
105 /// A dynamic heap can be relocated to a different base address when it is
106 /// grown.
107 Dynamic {
108 /// Global value providing the current bound of the heap in bytes.
109 bound_gv: GlobalValue,
110 },
111
112 /// A static heap has a fixed base address and a number of not-yet-allocated
113 /// pages before the offset-guard pages.
114 Static {
115 /// Heap bound in bytes. The offset-guard pages are allocated after the
116 /// bound.
117 bound: u64,
118 },
119}