wasmer_compiler_cranelift/translator/code_translator/bounds_checks.rs
1//! Implementation of Wasm to CLIF memory access translation.
2//!
3//! Given
4//!
5//! * a dynamic Wasm memory index operand,
6//! * a static offset immediate, and
7//! * a static access size,
8//!
9//! bounds check the memory access and translate it into a native memory access.
10//!
11//! !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
12//! !!! !!!
13//! !!! THIS CODE IS VERY SUBTLE, HAS MANY SPECIAL CASES, AND IS ALSO !!!
14//! !!! ABSOLUTELY CRITICAL FOR MAINTAINING THE SAFETY OF THE WASM HEAP !!!
15//! !!! SANDBOX. !!!
16//! !!! !!!
17//! !!! A good rule of thumb is to get two reviews on any substantive !!!
18//! !!! changes in here. !!!
19//! !!! !!!
20//! !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
21
22use super::Reachability;
23use crate::{
24 heap::{HeapData, HeapStyle},
25 translator::func_environ::FuncEnvironment,
26};
27use Reachability::*;
28use cranelift_codegen::{
29 cursor::{Cursor, FuncCursor},
30 ir::{self, InstBuilder, RelSourceLoc, condcodes::IntCC},
31 ir::{Expr, Fact},
32};
33use cranelift_frontend::FunctionBuilder;
34use wasmer_types::WasmResult;
35
36/// Helper used to emit bounds checks (as necessary) and compute the native
37/// address of a heap access.
38///
39/// Returns the `ir::Value` holding the native address of the heap access, or
40/// `None` if the heap access will unconditionally trap.
41pub fn bounds_check_and_compute_addr<Env>(
42 builder: &mut FunctionBuilder,
43 env: &mut Env,
44 heap: &HeapData,
45 // Dynamic operand indexing into the heap.
46 index: ir::Value,
47 // Static immediate added to the index.
48 offset: u32,
49 // Static size of the heap access.
50 access_size: u8,
51) -> WasmResult<Reachability<ir::Value>>
52where
53 Env: FuncEnvironment + ?Sized,
54{
55 let pointer_bit_width = u16::try_from(env.pointer_type().bits()).unwrap();
56 let orig_index = index;
57 let index = cast_index_to_pointer_ty(
58 index,
59 heap.index_type,
60 env.pointer_type(),
61 heap.memory_type.is_some(),
62 &mut builder.cursor(),
63 );
64 let offset_and_size = offset_plus_size(offset, access_size);
65 let spectre_mitigations_enabled = env.heap_access_spectre_mitigation();
66 let pcc = env.proof_carrying_code();
67
68 let host_page_size_log2 = env.target_config().page_size_align_log2;
69 let can_use_virtual_memory = heap.page_size_log2 >= host_page_size_log2;
70
71 let make_compare = |builder: &mut FunctionBuilder,
72 compare_kind: IntCC,
73 lhs: ir::Value,
74 lhs_off: Option<i64>,
75 rhs: ir::Value,
76 rhs_off: Option<i64>| {
77 let result = builder.ins().icmp(compare_kind, lhs, rhs);
78 if pcc {
79 // Name the original value as a def of the SSA value;
80 // if the value was extended, name that as well with a
81 // dynamic range, overwriting the basic full-range
82 // fact that we previously put on the uextend.
83 builder.func.dfg.facts[orig_index] = Some(Fact::Def { value: orig_index });
84 if index != orig_index {
85 builder.func.dfg.facts[index] = Some(Fact::value(pointer_bit_width, orig_index));
86 }
87
88 // Create a fact on the LHS that is a "trivial symbolic
89 // fact": v1 has range v1+LHS_off..=v1+LHS_off
90 builder.func.dfg.facts[lhs] = Some(Fact::value_offset(
91 pointer_bit_width,
92 orig_index,
93 lhs_off.unwrap(),
94 ));
95 // If the RHS is a symbolic value (v1 or gv1), we can
96 // emit a Compare fact.
97 if let Some(rhs) = builder.func.dfg.facts[rhs]
98 .as_ref()
99 .and_then(|f| f.as_symbol())
100 {
101 builder.func.dfg.facts[result] = Some(Fact::Compare {
102 kind: compare_kind,
103 lhs: Expr::offset(&Expr::value(orig_index), lhs_off.unwrap()).unwrap(),
104 rhs: Expr::offset(rhs, rhs_off.unwrap()).unwrap(),
105 });
106 }
107 // Likewise, if the RHS is a constant, we can emit a
108 // Compare fact.
109 if let Some(k) = builder.func.dfg.facts[rhs]
110 .as_ref()
111 .and_then(|f| f.as_const(pointer_bit_width))
112 {
113 builder.func.dfg.facts[result] = Some(Fact::Compare {
114 kind: compare_kind,
115 lhs: Expr::offset(&Expr::value(orig_index), lhs_off.unwrap()).unwrap(),
116 rhs: Expr::constant((k as i64).checked_add(rhs_off.unwrap()).unwrap()),
117 });
118 }
119 }
120 result
121 };
122
123 // We need to emit code that will trap (or compute an address that will trap
124 // when accessed) if
125 //
126 // index + offset + access_size > bound
127 //
128 // or if the `index + offset + access_size` addition overflows.
129 //
130 // Note that we ultimately want a 64-bit integer (we only target 64-bit
131 // architectures at the moment) and that `offset` is a `u32` and
132 // `access_size` is a `u8`. This means that we can add the latter together
133 // as `u64`s without fear of overflow, and we only have to be concerned with
134 // whether adding in `index` will overflow.
135 //
136 // Finally, the following right-hand sides of the matches do have a little
137 // bit of duplicated code across them, but I think writing it this way is
138 // worth it for readability and seeing very clearly each of our cases for
139 // different bounds checks and optimizations of those bounds checks. It is
140 // intentionally written in a straightforward case-matching style that will
141 // hopefully make it easy to port to ISLE one day.
142 Ok(match heap.style {
143 // ====== Dynamic Memories ======
144 //
145 // 1. First special case for when `offset + access_size == 1`:
146 //
147 // index + 1 > bound
148 // ==> index >= bound
149 HeapStyle::Dynamic { bound_gv } if offset_and_size == 1 => {
150 let bound = get_dynamic_heap_bound(builder, env, heap);
151 let oob = make_compare(
152 builder,
153 IntCC::UnsignedGreaterThanOrEqual,
154 index,
155 Some(0),
156 bound,
157 Some(0),
158 );
159 Reachable(explicit_check_oob_condition_and_compute_addr(
160 &mut builder.cursor(),
161 heap,
162 env.pointer_type(),
163 index,
164 offset,
165 access_size,
166 spectre_mitigations_enabled,
167 AddrPcc::dynamic(heap.memory_type, bound_gv),
168 oob,
169 ))
170 }
171
172 // 2. Second special case for when we know that there are enough guard
173 // pages to cover the offset and access size.
174 //
175 // The precise should-we-trap condition is
176 //
177 // index + offset + access_size > bound
178 //
179 // However, if we instead check only the partial condition
180 //
181 // index > bound
182 //
183 // then the most out of bounds that the access can be, while that
184 // partial check still succeeds, is `offset + access_size`.
185 //
186 // However, when we have a guard region that is at least as large as
187 // `offset + access_size`, we can rely on the virtual memory
188 // subsystem handling these out-of-bounds errors at
189 // runtime. Therefore, the partial `index > bound` check is
190 // sufficient for this heap configuration.
191 //
192 // Additionally, this has the advantage that a series of Wasm loads
193 // that use the same dynamic index operand but different static
194 // offset immediates -- which is a common code pattern when accessing
195 // multiple fields in the same struct that is in linear memory --
196 // will all emit the same `index > bound` check, which we can GVN.
197 HeapStyle::Dynamic { bound_gv }
198 if can_use_virtual_memory && offset_and_size <= heap.offset_guard_size =>
199 {
200 let bound = get_dynamic_heap_bound(builder, env, heap);
201 let oob = make_compare(
202 builder,
203 IntCC::UnsignedGreaterThan,
204 index,
205 Some(0),
206 bound,
207 Some(0),
208 );
209 Reachable(explicit_check_oob_condition_and_compute_addr(
210 &mut builder.cursor(),
211 heap,
212 env.pointer_type(),
213 index,
214 offset,
215 access_size,
216 spectre_mitigations_enabled,
217 AddrPcc::dynamic(heap.memory_type, bound_gv),
218 oob,
219 ))
220 }
221
222 // 3. Third special case for when `offset + access_size <= min_size`.
223 //
224 // We know that `bound >= min_size`, so we can do the following
225 // comparison, without fear of the right-hand side wrapping around:
226 //
227 // index + offset + access_size > bound
228 // ==> index > bound - (offset + access_size)
229 HeapStyle::Dynamic { bound_gv } if offset_and_size <= heap.min_size => {
230 let bound = get_dynamic_heap_bound(builder, env, heap);
231 let adjustment = offset_and_size as i64;
232 let adjustment_value = builder.ins().iconst(env.pointer_type(), adjustment);
233 if pcc {
234 builder.func.dfg.facts[adjustment_value] =
235 Some(Fact::constant(pointer_bit_width, offset_and_size));
236 }
237 let adjusted_bound = builder.ins().isub(bound, adjustment_value);
238 if pcc {
239 builder.func.dfg.facts[adjusted_bound] = Some(Fact::global_value_offset(
240 pointer_bit_width,
241 bound_gv,
242 -adjustment,
243 ));
244 }
245 let oob = make_compare(
246 builder,
247 IntCC::UnsignedGreaterThan,
248 index,
249 Some(0),
250 adjusted_bound,
251 Some(adjustment),
252 );
253 Reachable(explicit_check_oob_condition_and_compute_addr(
254 &mut builder.cursor(),
255 heap,
256 env.pointer_type(),
257 index,
258 offset,
259 access_size,
260 spectre_mitigations_enabled,
261 AddrPcc::dynamic(heap.memory_type, bound_gv),
262 oob,
263 ))
264 }
265
266 // 4. General case for dynamic memories:
267 //
268 // index + offset + access_size > bound
269 //
270 // And we have to handle the overflow case in the left-hand side.
271 HeapStyle::Dynamic { bound_gv } => {
272 let access_size_val = builder
273 .ins()
274 // Explicit cast from u64 to i64: we just want the raw
275 // bits, and iconst takes an `Imm64`.
276 .iconst(env.pointer_type(), offset_and_size as i64);
277 if pcc {
278 builder.func.dfg.facts[access_size_val] =
279 Some(Fact::constant(pointer_bit_width, offset_and_size));
280 }
281 let adjusted_index = builder.ins().uadd_overflow_trap(
282 index,
283 access_size_val,
284 ir::TrapCode::HEAP_OUT_OF_BOUNDS,
285 );
286 if pcc {
287 builder.func.dfg.facts[adjusted_index] = Some(Fact::value_offset(
288 pointer_bit_width,
289 index,
290 i64::try_from(offset_and_size).unwrap(),
291 ));
292 }
293 let bound = get_dynamic_heap_bound(builder, env, heap);
294 let oob = make_compare(
295 builder,
296 IntCC::UnsignedGreaterThan,
297 adjusted_index,
298 i64::try_from(offset_and_size).ok(),
299 bound,
300 Some(0),
301 );
302 Reachable(explicit_check_oob_condition_and_compute_addr(
303 &mut builder.cursor(),
304 heap,
305 env.pointer_type(),
306 index,
307 offset,
308 access_size,
309 spectre_mitigations_enabled,
310 AddrPcc::dynamic(heap.memory_type, bound_gv),
311 oob,
312 ))
313 }
314
315 // ====== Static Memories ======
316 //
317 // With static memories we know the size of the heap bound at compile
318 // time.
319 //
320 // 1. First special case: trap immediately if `offset + access_size >
321 // bound`, since we will end up being out-of-bounds regardless of the
322 // given `index`.
323 HeapStyle::Static { bound } if offset_and_size > bound => {
324 assert!(
325 can_use_virtual_memory,
326 "static memories require the ability to use virtual memory"
327 );
328 env.before_unconditionally_trapping_memory_access(builder)?;
329 builder.ins().trap(ir::TrapCode::HEAP_OUT_OF_BOUNDS);
330 Unreachable
331 }
332
333 // 2. Second special case for when we can completely omit explicit
334 // bounds checks for 32-bit static memories.
335 //
336 // First, let's rewrite our comparison to move all of the constants
337 // to one side:
338 //
339 // index + offset + access_size > bound
340 // ==> index > bound - (offset + access_size)
341 //
342 // We know the subtraction on the right-hand side won't wrap because
343 // we didn't hit the first special case.
344 //
345 // Additionally, we add our guard pages (if any) to the right-hand
346 // side, since we can rely on the virtual memory subsystem at runtime
347 // to catch out-of-bound accesses within the range `bound .. bound +
348 // guard_size`. So now we are dealing with
349 //
350 // index > bound + guard_size - (offset + access_size)
351 //
352 // Note that `bound + guard_size` cannot overflow for
353 // correctly-configured heaps, as otherwise the heap wouldn't fit in
354 // a 64-bit memory space.
355 //
356 // The complement of our should-this-trap comparison expression is
357 // the should-this-not-trap comparison expression:
358 //
359 // index <= bound + guard_size - (offset + access_size)
360 //
361 // If we know the right-hand side is greater than or equal to
362 // `u32::MAX`, then
363 //
364 // index <= u32::MAX <= bound + guard_size - (offset + access_size)
365 //
366 // This expression is always true when the heap is indexed with
367 // 32-bit integers because `index` cannot be larger than
368 // `u32::MAX`. This means that `index` is always either in bounds or
369 // within the guard page region, neither of which require emitting an
370 // explicit bounds check.
371 HeapStyle::Static { bound }
372 if can_use_virtual_memory
373 && heap.index_type == ir::types::I32
374 && u64::from(u32::MAX) <= bound + heap.offset_guard_size - offset_and_size =>
375 {
376 assert!(
377 can_use_virtual_memory,
378 "static memories require the ability to use virtual memory"
379 );
380 Reachable(compute_addr(
381 &mut builder.cursor(),
382 heap,
383 env.pointer_type(),
384 index,
385 offset,
386 AddrPcc::static32(heap.memory_type, bound + heap.offset_guard_size),
387 ))
388 }
389
390 // 3. General case for static memories.
391 //
392 // We have to explicitly test whether
393 //
394 // index > bound - (offset + access_size)
395 //
396 // and trap if so.
397 //
398 // Since we have to emit explicit bounds checks, we might as well be
399 // precise, not rely on the virtual memory subsystem at all, and not
400 // factor in the guard pages here.
401 HeapStyle::Static { bound } => {
402 assert!(
403 can_use_virtual_memory,
404 "static memories require the ability to use virtual memory"
405 );
406 // NB: this subtraction cannot wrap because we didn't hit the first
407 // special case.
408 let adjusted_bound = bound - offset_and_size;
409 let adjusted_bound_value = builder
410 .ins()
411 .iconst(env.pointer_type(), adjusted_bound as i64);
412 if pcc {
413 builder.func.dfg.facts[adjusted_bound_value] =
414 Some(Fact::constant(pointer_bit_width, adjusted_bound));
415 }
416 let oob = make_compare(
417 builder,
418 IntCC::UnsignedGreaterThan,
419 index,
420 Some(0),
421 adjusted_bound_value,
422 Some(0),
423 );
424 Reachable(explicit_check_oob_condition_and_compute_addr(
425 &mut builder.cursor(),
426 heap,
427 env.pointer_type(),
428 index,
429 offset,
430 access_size,
431 spectre_mitigations_enabled,
432 AddrPcc::static32(heap.memory_type, bound),
433 oob,
434 ))
435 }
436 })
437}
438
439/// Get the bound of a dynamic heap as an `ir::Value`.
440fn get_dynamic_heap_bound<Env>(
441 builder: &mut FunctionBuilder,
442 env: &mut Env,
443 heap: &HeapData,
444) -> ir::Value
445where
446 Env: FuncEnvironment + ?Sized,
447{
448 let enable_pcc = heap.memory_type.is_some();
449
450 let (value, gv) = match (heap.max_size, &heap.style) {
451 // The heap has a constant size, no need to actually load the
452 // bound. TODO: this is currently disabled for PCC because we
453 // can't easily prove that the GV load indeed results in a
454 // constant (that information is lost in the CLIF). We'll want
455 // to create an `iconst` GV expression kind to reify this fact
456 // in the GV, then re-enable this opt. (Or, alternately,
457 // compile such memories with a static-bound memtype and
458 // facts.)
459 (Some(max_size), HeapStyle::Dynamic { bound_gv })
460 if heap.min_size == max_size && !enable_pcc =>
461 {
462 (
463 builder.ins().iconst(env.pointer_type(), max_size as i64),
464 *bound_gv,
465 )
466 }
467
468 // Load the heap bound from its global variable.
469 (_, HeapStyle::Dynamic { bound_gv }) => (
470 builder.ins().global_value(env.pointer_type(), *bound_gv),
471 *bound_gv,
472 ),
473
474 (_, HeapStyle::Static { .. }) => unreachable!("not a dynamic heap"),
475 };
476
477 // If proof-carrying code is enabled, apply a fact to the range to
478 // tie it to the GV.
479 if enable_pcc {
480 builder.func.dfg.facts[value] = Some(Fact::global_value(
481 u16::try_from(env.pointer_type().bits()).unwrap(),
482 gv,
483 ));
484 }
485
486 value
487}
488
489fn cast_index_to_pointer_ty(
490 index: ir::Value,
491 index_ty: ir::Type,
492 pointer_ty: ir::Type,
493 pcc: bool,
494 pos: &mut FuncCursor,
495) -> ir::Value {
496 if index_ty == pointer_ty {
497 return index;
498 }
499 // Note that using 64-bit heaps on a 32-bit host is not currently supported,
500 // would require at least a bounds check here to ensure that the truncation
501 // from 64-to-32 bits doesn't lose any upper bits. For now though we're
502 // mostly interested in the 32-bit-heaps-on-64-bit-hosts cast.
503 assert!(index_ty.bits() < pointer_ty.bits());
504
505 // Convert `index` to `addr_ty`.
506 let extended_index = pos.ins().uextend(pointer_ty, index);
507
508 // Add a range fact on the extended value.
509 if pcc {
510 pos.func.dfg.facts[extended_index] = Some(Fact::max_range_for_width_extended(
511 u16::try_from(index_ty.bits()).unwrap(),
512 u16::try_from(pointer_ty.bits()).unwrap(),
513 ));
514 }
515
516 // Add debug value-label alias so that debuginfo can name the extended
517 // value as the address
518 let loc = pos.srcloc();
519 let loc = RelSourceLoc::from_base_offset(pos.func.params.base_srcloc(), loc);
520 pos.func
521 .stencil
522 .dfg
523 .add_value_label_alias(extended_index, loc, index);
524
525 extended_index
526}
527
528/// Which facts do we want to emit for proof-carrying code, if any, on
529/// address computations?
530#[derive(Clone, Copy, Debug)]
531enum AddrPcc {
532 /// A 32-bit static memory with the given size.
533 Static32(ir::MemoryType, u64),
534 /// Dynamic bounds-check, with actual memory size (the `GlobalValue`)
535 /// expressed symbolically.
536 Dynamic(ir::MemoryType, ir::GlobalValue),
537}
538impl AddrPcc {
539 fn static32(memory_type: Option<ir::MemoryType>, size: u64) -> Option<Self> {
540 memory_type.map(|ty| Self::Static32(ty, size))
541 }
542 fn dynamic(memory_type: Option<ir::MemoryType>, bound: ir::GlobalValue) -> Option<Self> {
543 memory_type.map(|ty| Self::Dynamic(ty, bound))
544 }
545}
546
547/// Emit explicit checks on the given out-of-bounds condition for the Wasm
548/// address and return the native address.
549///
550/// This function deduplicates explicit bounds checks and Spectre mitigations
551/// that inherently also implement bounds checking.
552#[allow(clippy::too_many_arguments)]
553fn explicit_check_oob_condition_and_compute_addr(
554 pos: &mut FuncCursor,
555 heap: &HeapData,
556 addr_ty: ir::Type,
557 index: ir::Value,
558 offset: u32,
559 access_size: u8,
560 // Whether Spectre mitigations are enabled for heap accesses.
561 spectre_mitigations_enabled: bool,
562 // Whether we're emitting PCC facts.
563 pcc: Option<AddrPcc>,
564 // The `i8` boolean value that is non-zero when the heap access is out of
565 // bounds (and therefore we should trap) and is zero when the heap access is
566 // in bounds (and therefore we can proceed).
567 oob_condition: ir::Value,
568) -> ir::Value {
569 if !spectre_mitigations_enabled {
570 pos.ins()
571 .trapnz(oob_condition, ir::TrapCode::HEAP_OUT_OF_BOUNDS);
572 }
573
574 let mut addr = compute_addr(pos, heap, addr_ty, index, offset, pcc);
575
576 if spectre_mitigations_enabled {
577 let null = pos.ins().iconst(addr_ty, 0);
578 addr = pos.ins().select_spectre_guard(oob_condition, null, addr);
579
580 match pcc {
581 None => {}
582 Some(AddrPcc::Static32(ty, size)) => {
583 pos.func.dfg.facts[null] =
584 Some(Fact::constant(u16::try_from(addr_ty.bits()).unwrap(), 0));
585 pos.func.dfg.facts[addr] = Some(Fact::Mem {
586 ty,
587 min_offset: 0,
588 max_offset: size.checked_sub(u64::from(access_size)).unwrap(),
589 nullable: true,
590 });
591 }
592 Some(AddrPcc::Dynamic(ty, gv)) => {
593 pos.func.dfg.facts[null] =
594 Some(Fact::constant(u16::try_from(addr_ty.bits()).unwrap(), 0));
595 pos.func.dfg.facts[addr] = Some(Fact::DynamicMem {
596 ty,
597 min: Expr::constant(0),
598 max: Expr::offset(
599 &Expr::global_value(gv),
600 i64::try_from(heap.offset_guard_size)
601 .unwrap()
602 .checked_sub(i64::from(access_size))
603 .unwrap(),
604 )
605 .unwrap(),
606 nullable: true,
607 });
608 }
609 }
610 }
611
612 addr
613}
614
615/// Emit code for the native address computation of a Wasm address,
616/// without any bounds checks or overflow checks.
617///
618/// It is the caller's responsibility to ensure that any necessary bounds and
619/// overflow checks are emitted, and that the resulting address is never used
620/// unless they succeed.
621fn compute_addr(
622 pos: &mut FuncCursor,
623 heap: &HeapData,
624 addr_ty: ir::Type,
625 index: ir::Value,
626 offset: u32,
627 pcc: Option<AddrPcc>,
628) -> ir::Value {
629 debug_assert_eq!(pos.func.dfg.value_type(index), addr_ty);
630
631 let heap_base = pos.ins().global_value(addr_ty, heap.base);
632
633 match pcc {
634 None => {}
635 Some(AddrPcc::Static32(ty, _size)) => {
636 pos.func.dfg.facts[heap_base] = Some(Fact::Mem {
637 ty,
638 min_offset: 0,
639 max_offset: 0,
640 nullable: false,
641 });
642 }
643 Some(AddrPcc::Dynamic(ty, _limit)) => {
644 pos.func.dfg.facts[heap_base] = Some(Fact::dynamic_base_ptr(ty));
645 }
646 }
647
648 let base_and_index = pos.ins().iadd(heap_base, index);
649
650 match pcc {
651 None => {}
652 Some(AddrPcc::Static32(ty, _) | AddrPcc::Dynamic(ty, _)) => {
653 if let Some(idx) = pos.func.dfg.facts[index]
654 .as_ref()
655 .and_then(|f| f.as_symbol())
656 .cloned()
657 {
658 pos.func.dfg.facts[base_and_index] = Some(Fact::DynamicMem {
659 ty,
660 min: idx.clone(),
661 max: idx,
662 nullable: false,
663 });
664 } else {
665 pos.func.dfg.facts[base_and_index] = Some(Fact::Mem {
666 ty,
667 min_offset: 0,
668 max_offset: u64::from(u32::MAX),
669 nullable: false,
670 });
671 }
672 }
673 }
674
675 if offset == 0 {
676 base_and_index
677 } else {
678 // NB: The addition of the offset immediate must happen *before* the
679 // `select_spectre_guard`, if any. If it happens after, then we
680 // potentially are letting speculative execution read the whole first
681 // 4GiB of memory.
682 let offset_val = pos.ins().iconst(addr_ty, i64::from(offset));
683
684 if pcc.is_some() {
685 pos.func.dfg.facts[offset_val] = Some(Fact::constant(
686 u16::try_from(addr_ty.bits()).unwrap(),
687 u64::from(offset),
688 ));
689 }
690
691 let result = pos.ins().iadd(base_and_index, offset_val);
692
693 match pcc {
694 None => {}
695 Some(AddrPcc::Static32(ty, _) | AddrPcc::Dynamic(ty, _)) => {
696 if let Some(idx) = pos.func.dfg.facts[index]
697 .as_ref()
698 .and_then(|f| f.as_symbol())
699 {
700 pos.func.dfg.facts[result] = Some(Fact::DynamicMem {
701 ty,
702 min: idx.clone(),
703 // Safety: adding an offset to an expression with
704 // zero offset -- add cannot wrap, so `unwrap()`
705 // cannot fail.
706 max: Expr::offset(idx, i64::from(offset)).unwrap(),
707 nullable: false,
708 });
709 } else {
710 pos.func.dfg.facts[result] = Some(Fact::Mem {
711 ty,
712 min_offset: u64::from(offset),
713 // Safety: can't overflow -- two u32s summed in a
714 // 64-bit add. TODO: when memory64 is supported here,
715 // `u32::MAX` is no longer true, and we'll need to
716 // handle overflow here.
717 max_offset: u64::from(u32::MAX) + u64::from(offset),
718 nullable: false,
719 });
720 }
721 }
722 }
723 result
724 }
725}
726
727#[inline]
728fn offset_plus_size(offset: u32, size: u8) -> u64 {
729 // Cannot overflow because we are widening to `u64`.
730 offset as u64 + size as u64
731}