wasmer_compiler_cranelift/translator/code_translator/bounds_checks.rs
1//! Implementation of Wasm to CLIF memory access translation.
2//!
3//! Given
4//!
5//! * a dynamic Wasm memory index operand,
6//! * a static offset immediate, and
7//! * a static access size,
8//!
9//! bounds check the memory access and translate it into a native memory access.
10//!
11//! !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
12//! !!! !!!
13//! !!! THIS CODE IS VERY SUBTLE, HAS MANY SPECIAL CASES, AND IS ALSO !!!
14//! !!! ABSOLUTELY CRITICAL FOR MAINTAINING THE SAFETY OF THE WASM HEAP !!!
15//! !!! SANDBOX. !!!
16//! !!! !!!
17//! !!! A good rule of thumb is to get two reviews on any substantive !!!
18//! !!! changes in here. !!!
19//! !!! !!!
20//! !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
21
22use super::Reachability;
23use crate::{
24 func_environ::FuncEnvironment,
25 heap::{HeapData, HeapStyle},
26};
27use Reachability::*;
28use cranelift_codegen::{
29 cursor::{Cursor, FuncCursor},
30 ir::{self, InstBuilder, RelSourceLoc, condcodes::IntCC},
31 ir::{Expr, Fact},
32};
33use cranelift_frontend::FunctionBuilder;
34use wasmer_types::WasmResult;
35
36/// Helper used to emit bounds checks (as necessary) and compute the native
37/// address of a heap access.
38///
39/// Returns the `ir::Value` holding the native address of the heap access, or
40/// `None` if the heap access will unconditionally trap.
41pub fn bounds_check_and_compute_addr(
42 builder: &mut FunctionBuilder,
43 env: &mut FuncEnvironment<'_>,
44 heap: &HeapData,
45 // Dynamic operand indexing into the heap.
46 index: ir::Value,
47 // Static immediate added to the index.
48 offset: u32,
49 // Static size of the heap access.
50 access_size: u8,
51) -> WasmResult<Reachability<ir::Value>> {
52 let pointer_bit_width = u16::try_from(env.pointer_type().bits()).unwrap();
53 let orig_index = index;
54 let index = cast_index_to_pointer_ty(
55 index,
56 heap.index_type,
57 env.pointer_type(),
58 heap.memory_type.is_some(),
59 &mut builder.cursor(),
60 );
61 let offset_and_size = offset_plus_size(offset, access_size);
62 let spectre_mitigations_enabled = env.heap_access_spectre_mitigation();
63 let pcc = env.proof_carrying_code();
64
65 let host_page_size_log2 = env.target_config().page_size_align_log2;
66 let can_use_virtual_memory = heap.page_size_log2 >= host_page_size_log2;
67
68 let make_compare = |builder: &mut FunctionBuilder,
69 compare_kind: IntCC,
70 lhs: ir::Value,
71 lhs_off: Option<i64>,
72 rhs: ir::Value,
73 rhs_off: Option<i64>| {
74 let result = builder.ins().icmp(compare_kind, lhs, rhs);
75 if pcc {
76 // Name the original value as a def of the SSA value;
77 // if the value was extended, name that as well with a
78 // dynamic range, overwriting the basic full-range
79 // fact that we previously put on the uextend.
80 builder.func.dfg.facts[orig_index] = Some(Fact::Def { value: orig_index });
81 if index != orig_index {
82 builder.func.dfg.facts[index] = Some(Fact::value(pointer_bit_width, orig_index));
83 }
84
85 // Create a fact on the LHS that is a "trivial symbolic
86 // fact": v1 has range v1+LHS_off..=v1+LHS_off
87 builder.func.dfg.facts[lhs] = Some(Fact::value_offset(
88 pointer_bit_width,
89 orig_index,
90 lhs_off.unwrap(),
91 ));
92 // If the RHS is a symbolic value (v1 or gv1), we can
93 // emit a Compare fact.
94 if let Some(rhs) = builder.func.dfg.facts[rhs]
95 .as_ref()
96 .and_then(|f| f.as_symbol())
97 {
98 builder.func.dfg.facts[result] = Some(Fact::Compare {
99 kind: compare_kind,
100 lhs: Expr::offset(&Expr::value(orig_index), lhs_off.unwrap()).unwrap(),
101 rhs: Expr::offset(rhs, rhs_off.unwrap()).unwrap(),
102 });
103 }
104 // Likewise, if the RHS is a constant, we can emit a
105 // Compare fact.
106 if let Some(k) = builder.func.dfg.facts[rhs]
107 .as_ref()
108 .and_then(|f| f.as_const(pointer_bit_width))
109 {
110 builder.func.dfg.facts[result] = Some(Fact::Compare {
111 kind: compare_kind,
112 lhs: Expr::offset(&Expr::value(orig_index), lhs_off.unwrap()).unwrap(),
113 rhs: Expr::constant((k as i64).checked_add(rhs_off.unwrap()).unwrap()),
114 });
115 }
116 }
117 result
118 };
119
120 // We need to emit code that will trap (or compute an address that will trap
121 // when accessed) if
122 //
123 // index + offset + access_size > bound
124 //
125 // or if the `index + offset + access_size` addition overflows.
126 //
127 // Note that we ultimately want a 64-bit integer (we only target 64-bit
128 // architectures at the moment) and that `offset` is a `u32` and
129 // `access_size` is a `u8`. This means that we can add the latter together
130 // as `u64`s without fear of overflow, and we only have to be concerned with
131 // whether adding in `index` will overflow.
132 //
133 // Finally, the following right-hand sides of the matches do have a little
134 // bit of duplicated code across them, but I think writing it this way is
135 // worth it for readability and seeing very clearly each of our cases for
136 // different bounds checks and optimizations of those bounds checks. It is
137 // intentionally written in a straightforward case-matching style that will
138 // hopefully make it easy to port to ISLE one day.
139 Ok(match heap.style {
140 // ====== Dynamic Memories ======
141 //
142 // 1. First special case for when `offset + access_size == 1`:
143 //
144 // index + 1 > bound
145 // ==> index >= bound
146 HeapStyle::Dynamic { bound_gv } if offset_and_size == 1 => {
147 let bound = get_dynamic_heap_bound(builder, env, heap);
148 let oob = make_compare(
149 builder,
150 IntCC::UnsignedGreaterThanOrEqual,
151 index,
152 Some(0),
153 bound,
154 Some(0),
155 );
156 Reachable(explicit_check_oob_condition_and_compute_addr(
157 &mut builder.cursor(),
158 heap,
159 env.pointer_type(),
160 index,
161 offset,
162 access_size,
163 spectre_mitigations_enabled,
164 AddrPcc::dynamic(heap.memory_type, bound_gv),
165 oob,
166 ))
167 }
168
169 // 2. Second special case for when we know that there are enough guard
170 // pages to cover the offset and access size.
171 //
172 // The precise should-we-trap condition is
173 //
174 // index + offset + access_size > bound
175 //
176 // However, if we instead check only the partial condition
177 //
178 // index > bound
179 //
180 // then the most out of bounds that the access can be, while that
181 // partial check still succeeds, is `offset + access_size`.
182 //
183 // However, when we have a guard region that is at least as large as
184 // `offset + access_size`, we can rely on the virtual memory
185 // subsystem handling these out-of-bounds errors at
186 // runtime. Therefore, the partial `index > bound` check is
187 // sufficient for this heap configuration.
188 //
189 // Additionally, this has the advantage that a series of Wasm loads
190 // that use the same dynamic index operand but different static
191 // offset immediates -- which is a common code pattern when accessing
192 // multiple fields in the same struct that is in linear memory --
193 // will all emit the same `index > bound` check, which we can GVN.
194 HeapStyle::Dynamic { bound_gv }
195 if can_use_virtual_memory && offset_and_size <= heap.offset_guard_size =>
196 {
197 let bound = get_dynamic_heap_bound(builder, env, heap);
198 let oob = make_compare(
199 builder,
200 IntCC::UnsignedGreaterThan,
201 index,
202 Some(0),
203 bound,
204 Some(0),
205 );
206 Reachable(explicit_check_oob_condition_and_compute_addr(
207 &mut builder.cursor(),
208 heap,
209 env.pointer_type(),
210 index,
211 offset,
212 access_size,
213 spectre_mitigations_enabled,
214 AddrPcc::dynamic(heap.memory_type, bound_gv),
215 oob,
216 ))
217 }
218
219 // 3. Third special case for when `offset + access_size <= min_size`.
220 //
221 // We know that `bound >= min_size`, so we can do the following
222 // comparison, without fear of the right-hand side wrapping around:
223 //
224 // index + offset + access_size > bound
225 // ==> index > bound - (offset + access_size)
226 HeapStyle::Dynamic { bound_gv } if offset_and_size <= heap.min_size => {
227 let bound = get_dynamic_heap_bound(builder, env, heap);
228 let adjustment = offset_and_size as i64;
229 let adjustment_value = builder.ins().iconst(env.pointer_type(), adjustment);
230 if pcc {
231 builder.func.dfg.facts[adjustment_value] =
232 Some(Fact::constant(pointer_bit_width, offset_and_size));
233 }
234 let adjusted_bound = builder.ins().isub(bound, adjustment_value);
235 if pcc {
236 builder.func.dfg.facts[adjusted_bound] = Some(Fact::global_value_offset(
237 pointer_bit_width,
238 bound_gv,
239 -adjustment,
240 ));
241 }
242 let oob = make_compare(
243 builder,
244 IntCC::UnsignedGreaterThan,
245 index,
246 Some(0),
247 adjusted_bound,
248 Some(adjustment),
249 );
250 Reachable(explicit_check_oob_condition_and_compute_addr(
251 &mut builder.cursor(),
252 heap,
253 env.pointer_type(),
254 index,
255 offset,
256 access_size,
257 spectre_mitigations_enabled,
258 AddrPcc::dynamic(heap.memory_type, bound_gv),
259 oob,
260 ))
261 }
262
263 // 4. General case for dynamic memories:
264 //
265 // index + offset + access_size > bound
266 //
267 // And we have to handle the overflow case in the left-hand side.
268 HeapStyle::Dynamic { bound_gv } => {
269 let access_size_val = builder
270 .ins()
271 // Explicit cast from u64 to i64: we just want the raw
272 // bits, and iconst takes an `Imm64`.
273 .iconst(env.pointer_type(), offset_and_size as i64);
274 if pcc {
275 builder.func.dfg.facts[access_size_val] =
276 Some(Fact::constant(pointer_bit_width, offset_and_size));
277 }
278 let adjusted_index = builder.ins().uadd_overflow_trap(
279 index,
280 access_size_val,
281 ir::TrapCode::HEAP_OUT_OF_BOUNDS,
282 );
283 if pcc {
284 builder.func.dfg.facts[adjusted_index] = Some(Fact::value_offset(
285 pointer_bit_width,
286 index,
287 i64::try_from(offset_and_size).unwrap(),
288 ));
289 }
290 let bound = get_dynamic_heap_bound(builder, env, heap);
291 let oob = make_compare(
292 builder,
293 IntCC::UnsignedGreaterThan,
294 adjusted_index,
295 i64::try_from(offset_and_size).ok(),
296 bound,
297 Some(0),
298 );
299 Reachable(explicit_check_oob_condition_and_compute_addr(
300 &mut builder.cursor(),
301 heap,
302 env.pointer_type(),
303 index,
304 offset,
305 access_size,
306 spectre_mitigations_enabled,
307 AddrPcc::dynamic(heap.memory_type, bound_gv),
308 oob,
309 ))
310 }
311
312 // ====== Static Memories ======
313 //
314 // With static memories we know the size of the heap bound at compile
315 // time.
316 //
317 // 1. First special case: trap immediately if `offset + access_size >
318 // bound`, since we will end up being out-of-bounds regardless of the
319 // given `index`.
320 HeapStyle::Static { bound } if offset_and_size > bound => {
321 assert!(
322 can_use_virtual_memory,
323 "static memories require the ability to use virtual memory"
324 );
325 builder.ins().trap(ir::TrapCode::HEAP_OUT_OF_BOUNDS);
326 Unreachable
327 }
328
329 // 2. Second special case for when we can completely omit explicit
330 // bounds checks for 32-bit static memories.
331 //
332 // First, let's rewrite our comparison to move all of the constants
333 // to one side:
334 //
335 // index + offset + access_size > bound
336 // ==> index > bound - (offset + access_size)
337 //
338 // We know the subtraction on the right-hand side won't wrap because
339 // we didn't hit the first special case.
340 //
341 // Additionally, we add our guard pages (if any) to the right-hand
342 // side, since we can rely on the virtual memory subsystem at runtime
343 // to catch out-of-bound accesses within the range `bound .. bound +
344 // guard_size`. So now we are dealing with
345 //
346 // index > bound + guard_size - (offset + access_size)
347 //
348 // Note that `bound + guard_size` cannot overflow for
349 // correctly-configured heaps, as otherwise the heap wouldn't fit in
350 // a 64-bit memory space.
351 //
352 // The complement of our should-this-trap comparison expression is
353 // the should-this-not-trap comparison expression:
354 //
355 // index <= bound + guard_size - (offset + access_size)
356 //
357 // If we know the right-hand side is greater than or equal to
358 // `u32::MAX`, then
359 //
360 // index <= u32::MAX <= bound + guard_size - (offset + access_size)
361 //
362 // This expression is always true when the heap is indexed with
363 // 32-bit integers because `index` cannot be larger than
364 // `u32::MAX`. This means that `index` is always either in bounds or
365 // within the guard page region, neither of which require emitting an
366 // explicit bounds check.
367 HeapStyle::Static { bound }
368 if can_use_virtual_memory
369 && heap.index_type == ir::types::I32
370 && u64::from(u32::MAX) <= bound + heap.offset_guard_size - offset_and_size =>
371 {
372 assert!(
373 can_use_virtual_memory,
374 "static memories require the ability to use virtual memory"
375 );
376 Reachable(compute_addr(
377 &mut builder.cursor(),
378 heap,
379 env.pointer_type(),
380 index,
381 offset,
382 AddrPcc::static32(heap.memory_type, bound + heap.offset_guard_size),
383 ))
384 }
385
386 // 3. General case for static memories.
387 //
388 // We have to explicitly test whether
389 //
390 // index > bound - (offset + access_size)
391 //
392 // and trap if so.
393 //
394 // Since we have to emit explicit bounds checks, we might as well be
395 // precise, not rely on the virtual memory subsystem at all, and not
396 // factor in the guard pages here.
397 HeapStyle::Static { bound } => {
398 assert!(
399 can_use_virtual_memory,
400 "static memories require the ability to use virtual memory"
401 );
402 // NB: this subtraction cannot wrap because we didn't hit the first
403 // special case.
404 let adjusted_bound = bound - offset_and_size;
405 let adjusted_bound_value = builder
406 .ins()
407 .iconst(env.pointer_type(), adjusted_bound as i64);
408 if pcc {
409 builder.func.dfg.facts[adjusted_bound_value] =
410 Some(Fact::constant(pointer_bit_width, adjusted_bound));
411 }
412 let oob = make_compare(
413 builder,
414 IntCC::UnsignedGreaterThan,
415 index,
416 Some(0),
417 adjusted_bound_value,
418 Some(0),
419 );
420 Reachable(explicit_check_oob_condition_and_compute_addr(
421 &mut builder.cursor(),
422 heap,
423 env.pointer_type(),
424 index,
425 offset,
426 access_size,
427 spectre_mitigations_enabled,
428 AddrPcc::static32(heap.memory_type, bound),
429 oob,
430 ))
431 }
432 })
433}
434
435/// Get the bound of a dynamic heap as an `ir::Value`.
436fn get_dynamic_heap_bound(
437 builder: &mut FunctionBuilder,
438 env: &mut FuncEnvironment<'_>,
439 heap: &HeapData,
440) -> ir::Value {
441 let enable_pcc = heap.memory_type.is_some();
442
443 let (value, gv) = match (heap.max_size, &heap.style) {
444 // The heap has a constant size, no need to actually load the
445 // bound. TODO: this is currently disabled for PCC because we
446 // can't easily prove that the GV load indeed results in a
447 // constant (that information is lost in the CLIF). We'll want
448 // to create an `iconst` GV expression kind to reify this fact
449 // in the GV, then re-enable this opt. (Or, alternately,
450 // compile such memories with a static-bound memtype and
451 // facts.)
452 (Some(max_size), HeapStyle::Dynamic { bound_gv })
453 if heap.min_size == max_size && !enable_pcc =>
454 {
455 (
456 builder.ins().iconst(env.pointer_type(), max_size as i64),
457 *bound_gv,
458 )
459 }
460
461 // Load the heap bound from its global variable.
462 (_, HeapStyle::Dynamic { bound_gv }) => (
463 builder.ins().global_value(env.pointer_type(), *bound_gv),
464 *bound_gv,
465 ),
466
467 (_, HeapStyle::Static { .. }) => unreachable!("not a dynamic heap"),
468 };
469
470 // If proof-carrying code is enabled, apply a fact to the range to
471 // tie it to the GV.
472 if enable_pcc {
473 builder.func.dfg.facts[value] = Some(Fact::global_value(
474 u16::try_from(env.pointer_type().bits()).unwrap(),
475 gv,
476 ));
477 }
478
479 value
480}
481
482fn cast_index_to_pointer_ty(
483 index: ir::Value,
484 index_ty: ir::Type,
485 pointer_ty: ir::Type,
486 pcc: bool,
487 pos: &mut FuncCursor,
488) -> ir::Value {
489 if index_ty == pointer_ty {
490 return index;
491 }
492 // Note that using 64-bit heaps on a 32-bit host is not currently supported,
493 // would require at least a bounds check here to ensure that the truncation
494 // from 64-to-32 bits doesn't lose any upper bits. For now though we're
495 // mostly interested in the 32-bit-heaps-on-64-bit-hosts cast.
496 assert!(index_ty.bits() < pointer_ty.bits());
497
498 // Convert `index` to `addr_ty`.
499 let extended_index = pos.ins().uextend(pointer_ty, index);
500
501 // Add a range fact on the extended value.
502 if pcc {
503 pos.func.dfg.facts[extended_index] = Some(Fact::max_range_for_width_extended(
504 u16::try_from(index_ty.bits()).unwrap(),
505 u16::try_from(pointer_ty.bits()).unwrap(),
506 ));
507 }
508
509 // Add debug value-label alias so that debuginfo can name the extended
510 // value as the address
511 let loc = pos.srcloc();
512 let loc = RelSourceLoc::from_base_offset(pos.func.params.base_srcloc(), loc);
513 pos.func
514 .stencil
515 .dfg
516 .add_value_label_alias(extended_index, loc, index);
517
518 extended_index
519}
520
521/// Which facts do we want to emit for proof-carrying code, if any, on
522/// address computations?
523#[derive(Clone, Copy, Debug)]
524enum AddrPcc {
525 /// A 32-bit static memory with the given size.
526 Static32(ir::MemoryType, u64),
527 /// Dynamic bounds-check, with actual memory size (the `GlobalValue`)
528 /// expressed symbolically.
529 Dynamic(ir::MemoryType, ir::GlobalValue),
530}
531impl AddrPcc {
532 fn static32(memory_type: Option<ir::MemoryType>, size: u64) -> Option<Self> {
533 memory_type.map(|ty| Self::Static32(ty, size))
534 }
535 fn dynamic(memory_type: Option<ir::MemoryType>, bound: ir::GlobalValue) -> Option<Self> {
536 memory_type.map(|ty| Self::Dynamic(ty, bound))
537 }
538}
539
540/// Emit explicit checks on the given out-of-bounds condition for the Wasm
541/// address and return the native address.
542///
543/// This function deduplicates explicit bounds checks and Spectre mitigations
544/// that inherently also implement bounds checking.
545#[allow(clippy::too_many_arguments)]
546fn explicit_check_oob_condition_and_compute_addr(
547 pos: &mut FuncCursor,
548 heap: &HeapData,
549 addr_ty: ir::Type,
550 index: ir::Value,
551 offset: u32,
552 access_size: u8,
553 // Whether Spectre mitigations are enabled for heap accesses.
554 spectre_mitigations_enabled: bool,
555 // Whether we're emitting PCC facts.
556 pcc: Option<AddrPcc>,
557 // The `i8` boolean value that is non-zero when the heap access is out of
558 // bounds (and therefore we should trap) and is zero when the heap access is
559 // in bounds (and therefore we can proceed).
560 oob_condition: ir::Value,
561) -> ir::Value {
562 if !spectre_mitigations_enabled {
563 pos.ins()
564 .trapnz(oob_condition, ir::TrapCode::HEAP_OUT_OF_BOUNDS);
565 }
566
567 let mut addr = compute_addr(pos, heap, addr_ty, index, offset, pcc);
568
569 if spectre_mitigations_enabled {
570 let null = pos.ins().iconst(addr_ty, 0);
571 addr = pos.ins().select_spectre_guard(oob_condition, null, addr);
572
573 match pcc {
574 None => {}
575 Some(AddrPcc::Static32(ty, size)) => {
576 pos.func.dfg.facts[null] =
577 Some(Fact::constant(u16::try_from(addr_ty.bits()).unwrap(), 0));
578 pos.func.dfg.facts[addr] = Some(Fact::Mem {
579 ty,
580 min_offset: 0,
581 max_offset: size.checked_sub(u64::from(access_size)).unwrap(),
582 nullable: true,
583 });
584 }
585 Some(AddrPcc::Dynamic(ty, gv)) => {
586 pos.func.dfg.facts[null] =
587 Some(Fact::constant(u16::try_from(addr_ty.bits()).unwrap(), 0));
588 pos.func.dfg.facts[addr] = Some(Fact::DynamicMem {
589 ty,
590 min: Expr::constant(0),
591 max: Expr::offset(
592 &Expr::global_value(gv),
593 i64::try_from(heap.offset_guard_size)
594 .unwrap()
595 .checked_sub(i64::from(access_size))
596 .unwrap(),
597 )
598 .unwrap(),
599 nullable: true,
600 });
601 }
602 }
603 }
604
605 addr
606}
607
608/// Emit code for the native address computation of a Wasm address,
609/// without any bounds checks or overflow checks.
610///
611/// It is the caller's responsibility to ensure that any necessary bounds and
612/// overflow checks are emitted, and that the resulting address is never used
613/// unless they succeed.
614fn compute_addr(
615 pos: &mut FuncCursor,
616 heap: &HeapData,
617 addr_ty: ir::Type,
618 index: ir::Value,
619 offset: u32,
620 pcc: Option<AddrPcc>,
621) -> ir::Value {
622 debug_assert_eq!(pos.func.dfg.value_type(index), addr_ty);
623
624 let heap_base = pos.ins().global_value(addr_ty, heap.base);
625
626 match pcc {
627 None => {}
628 Some(AddrPcc::Static32(ty, _size)) => {
629 pos.func.dfg.facts[heap_base] = Some(Fact::Mem {
630 ty,
631 min_offset: 0,
632 max_offset: 0,
633 nullable: false,
634 });
635 }
636 Some(AddrPcc::Dynamic(ty, _limit)) => {
637 pos.func.dfg.facts[heap_base] = Some(Fact::dynamic_base_ptr(ty));
638 }
639 }
640
641 let base_and_index = pos.ins().iadd(heap_base, index);
642
643 match pcc {
644 None => {}
645 Some(AddrPcc::Static32(ty, _) | AddrPcc::Dynamic(ty, _)) => {
646 if let Some(idx) = pos.func.dfg.facts[index]
647 .as_ref()
648 .and_then(|f| f.as_symbol())
649 .cloned()
650 {
651 pos.func.dfg.facts[base_and_index] = Some(Fact::DynamicMem {
652 ty,
653 min: idx.clone(),
654 max: idx,
655 nullable: false,
656 });
657 } else {
658 pos.func.dfg.facts[base_and_index] = Some(Fact::Mem {
659 ty,
660 min_offset: 0,
661 max_offset: u64::from(u32::MAX),
662 nullable: false,
663 });
664 }
665 }
666 }
667
668 if offset == 0 {
669 base_and_index
670 } else {
671 // NB: The addition of the offset immediate must happen *before* the
672 // `select_spectre_guard`, if any. If it happens after, then we
673 // potentially are letting speculative execution read the whole first
674 // 4GiB of memory.
675 let offset_val = pos.ins().iconst(addr_ty, i64::from(offset));
676
677 if pcc.is_some() {
678 pos.func.dfg.facts[offset_val] = Some(Fact::constant(
679 u16::try_from(addr_ty.bits()).unwrap(),
680 u64::from(offset),
681 ));
682 }
683
684 let result = pos.ins().iadd(base_and_index, offset_val);
685
686 match pcc {
687 None => {}
688 Some(AddrPcc::Static32(ty, _) | AddrPcc::Dynamic(ty, _)) => {
689 if let Some(idx) = pos.func.dfg.facts[index]
690 .as_ref()
691 .and_then(|f| f.as_symbol())
692 {
693 pos.func.dfg.facts[result] = Some(Fact::DynamicMem {
694 ty,
695 min: idx.clone(),
696 // Safety: adding an offset to an expression with
697 // zero offset -- add cannot wrap, so `unwrap()`
698 // cannot fail.
699 max: Expr::offset(idx, i64::from(offset)).unwrap(),
700 nullable: false,
701 });
702 } else {
703 pos.func.dfg.facts[result] = Some(Fact::Mem {
704 ty,
705 min_offset: u64::from(offset),
706 // Safety: can't overflow -- two u32s summed in a
707 // 64-bit add. TODO: when memory64 is supported here,
708 // `u32::MAX` is no longer true, and we'll need to
709 // handle overflow here.
710 max_offset: u64::from(u32::MAX) + u64::from(offset),
711 nullable: false,
712 });
713 }
714 }
715 }
716 result
717 }
718}
719
720#[inline]
721fn offset_plus_size(offset: u32, size: u8) -> u64 {
722 // Cannot overflow because we are widening to `u64`.
723 offset as u64 + size as u64
724}