miri/alloc_addresses/mod.rs
1//! This module is responsible for managing the absolute addresses that allocations are located at,
2//! and for casting between pointers and integers based on those addresses.
3
4mod reuse_pool;
5
6use std::cell::RefCell;
7use std::cmp::max;
8
9use rand::Rng;
10use rustc_abi::{Align, Size};
11use rustc_data_structures::fx::{FxHashMap, FxHashSet};
12
13use self::reuse_pool::ReusePool;
14use crate::concurrency::VClock;
15use crate::*;
16
17#[derive(Copy, Clone, Debug, PartialEq, Eq)]
18pub enum ProvenanceMode {
19 /// We support `expose_provenance`/`with_exposed_provenance` via "wildcard" provenance.
20 /// However, we warn on `with_exposed_provenance` to alert the user of the precision loss.
21 Default,
22 /// Like `Default`, but without the warning.
23 Permissive,
24 /// We error on `with_exposed_provenance`, ensuring no precision loss.
25 Strict,
26}
27
28pub type GlobalState = RefCell<GlobalStateInner>;
29
30#[derive(Debug)]
31pub struct GlobalStateInner {
32 /// This is used as a map between the address of each allocation and its `AllocId`. It is always
33 /// sorted by address. We cannot use a `HashMap` since we can be given an address that is offset
34 /// from the base address, and we need to find the `AllocId` it belongs to. This is not the
35 /// *full* inverse of `base_addr`; dead allocations have been removed.
36 int_to_ptr_map: Vec<(u64, AllocId)>,
37 /// The base address for each allocation. We cannot put that into
38 /// `AllocExtra` because function pointers also have a base address, and
39 /// they do not have an `AllocExtra`.
40 /// This is the inverse of `int_to_ptr_map`.
41 base_addr: FxHashMap<AllocId, u64>,
42 /// Temporarily store prepared memory space for global allocations the first time their memory
43 /// address is required. This is used to ensure that the memory is allocated before Miri assigns
44 /// it an internal address, which is important for matching the internal address to the machine
45 /// address so FFI can read from pointers.
46 prepared_alloc_bytes: FxHashMap<AllocId, MiriAllocBytes>,
47 /// A pool of addresses we can reuse for future allocations.
48 reuse: ReusePool,
49 /// Whether an allocation has been exposed or not. This cannot be put
50 /// into `AllocExtra` for the same reason as `base_addr`.
51 exposed: FxHashSet<AllocId>,
52 /// This is used as a memory address when a new pointer is casted to an integer. It
53 /// is always larger than any address that was previously made part of a block.
54 next_base_addr: u64,
55 /// The provenance to use for int2ptr casts
56 provenance_mode: ProvenanceMode,
57}
58
59impl VisitProvenance for GlobalStateInner {
60 fn visit_provenance(&self, _visit: &mut VisitWith<'_>) {
61 let GlobalStateInner {
62 int_to_ptr_map: _,
63 base_addr: _,
64 prepared_alloc_bytes: _,
65 reuse: _,
66 exposed: _,
67 next_base_addr: _,
68 provenance_mode: _,
69 } = self;
70 // Though base_addr, int_to_ptr_map, and exposed contain AllocIds, we do not want to visit them.
71 // int_to_ptr_map and exposed must contain only live allocations, and those
72 // are never garbage collected.
73 // base_addr is only relevant if we have a pointer to an AllocId and need to look up its
74 // base address; so if an AllocId is not reachable from somewhere else we can remove it
75 // here.
76 }
77}
78
79impl GlobalStateInner {
80 pub fn new(config: &MiriConfig, stack_addr: u64) -> Self {
81 GlobalStateInner {
82 int_to_ptr_map: Vec::default(),
83 base_addr: FxHashMap::default(),
84 prepared_alloc_bytes: FxHashMap::default(),
85 reuse: ReusePool::new(config),
86 exposed: FxHashSet::default(),
87 next_base_addr: stack_addr,
88 provenance_mode: config.provenance_mode,
89 }
90 }
91
92 pub fn remove_unreachable_allocs(&mut self, allocs: &LiveAllocs<'_, '_>) {
93 // `exposed` and `int_to_ptr_map` are cleared immediately when an allocation
94 // is freed, so `base_addr` is the only one we have to clean up based on the GC.
95 self.base_addr.retain(|id, _| allocs.is_live(*id));
96 }
97}
98
99/// Shifts `addr` to make it aligned with `align` by rounding `addr` to the smallest multiple
100/// of `align` that is larger or equal to `addr`
101fn align_addr(addr: u64, align: u64) -> u64 {
102 match addr % align {
103 0 => addr,
104 rem => addr.strict_add(align) - rem,
105 }
106}
107
108impl<'tcx> EvalContextExtPriv<'tcx> for crate::MiriInterpCx<'tcx> {}
109trait EvalContextExtPriv<'tcx>: crate::MiriInterpCxExt<'tcx> {
110 // Returns the exposed `AllocId` that corresponds to the specified addr,
111 // or `None` if the addr is out of bounds
112 fn alloc_id_from_addr(&self, addr: u64, size: i64) -> Option<AllocId> {
113 let this = self.eval_context_ref();
114 let global_state = this.machine.alloc_addresses.borrow();
115 assert!(global_state.provenance_mode != ProvenanceMode::Strict);
116
117 // We always search the allocation to the right of this address. So if the size is structly
118 // negative, we have to search for `addr-1` instead.
119 let addr = if size >= 0 { addr } else { addr.saturating_sub(1) };
120 let pos = global_state.int_to_ptr_map.binary_search_by_key(&addr, |(addr, _)| *addr);
121
122 // Determine the in-bounds provenance for this pointer.
123 let alloc_id = match pos {
124 Ok(pos) => Some(global_state.int_to_ptr_map[pos].1),
125 Err(0) => None,
126 Err(pos) => {
127 // This is the largest of the addresses smaller than `int`,
128 // i.e. the greatest lower bound (glb)
129 let (glb, alloc_id) = global_state.int_to_ptr_map[pos - 1];
130 // This never overflows because `addr >= glb`
131 let offset = addr - glb;
132 // We require this to be strict in-bounds of the allocation. This arm is only
133 // entered for addresses that are not the base address, so even zero-sized
134 // allocations will get recognized at their base address -- but all other
135 // allocations will *not* be recognized at their "end" address.
136 let size = this.get_alloc_info(alloc_id).size;
137 if offset < size.bytes() { Some(alloc_id) } else { None }
138 }
139 }?;
140
141 // We only use this provenance if it has been exposed.
142 if global_state.exposed.contains(&alloc_id) {
143 // This must still be live, since we remove allocations from `int_to_ptr_map` when they get freed.
144 debug_assert!(this.is_alloc_live(alloc_id));
145 Some(alloc_id)
146 } else {
147 None
148 }
149 }
150
151 fn addr_from_alloc_id_uncached(
152 &self,
153 global_state: &mut GlobalStateInner,
154 alloc_id: AllocId,
155 memory_kind: MemoryKind,
156 ) -> InterpResult<'tcx, u64> {
157 let this = self.eval_context_ref();
158 let mut rng = this.machine.rng.borrow_mut();
159 let info = this.get_alloc_info(alloc_id);
160 // This is either called immediately after allocation (and then cached), or when
161 // adjusting `tcx` pointers (which never get freed). So assert that we are looking
162 // at a live allocation. This also ensures that we never re-assign an address to an
163 // allocation that previously had an address, but then was freed and the address
164 // information was removed.
165 assert!(!matches!(info.kind, AllocKind::Dead));
166
167 // This allocation does not have a base address yet, pick or reuse one.
168 if this.machine.native_lib.is_some() {
169 // In native lib mode, we use the "real" address of the bytes for this allocation.
170 // This ensures the interpreted program and native code have the same view of memory.
171 let base_ptr = match info.kind {
172 AllocKind::LiveData => {
173 if memory_kind == MiriMemoryKind::Global.into() {
174 // For new global allocations, we always pre-allocate the memory to be able use the machine address directly.
175 let prepared_bytes = MiriAllocBytes::zeroed(info.size, info.align)
176 .unwrap_or_else(|| {
177 panic!("Miri ran out of memory: cannot create allocation of {size:?} bytes", size = info.size)
178 });
179 let ptr = prepared_bytes.as_ptr();
180 // Store prepared allocation to be picked up for use later.
181 global_state
182 .prepared_alloc_bytes
183 .try_insert(alloc_id, prepared_bytes)
184 .unwrap();
185 ptr
186 } else {
187 // Non-global allocations are already in memory at this point so
188 // we can just get a pointer to where their data is stored.
189 this.get_alloc_bytes_unchecked_raw(alloc_id)?
190 }
191 }
192 AllocKind::Function | AllocKind::VTable => {
193 // Allocate some dummy memory to get a unique address for this function/vtable.
194 let alloc_bytes =
195 MiriAllocBytes::from_bytes(&[0u8; 1], Align::from_bytes(1).unwrap());
196 let ptr = alloc_bytes.as_ptr();
197 // Leak the underlying memory to ensure it remains unique.
198 std::mem::forget(alloc_bytes);
199 ptr
200 }
201 AllocKind::Dead => unreachable!(),
202 };
203 // We don't have to expose this pointer yet, we do that in `prepare_for_native_call`.
204 return interp_ok(base_ptr.addr().try_into().unwrap());
205 }
206 // We are not in native lib mode, so we control the addresses ourselves.
207 if let Some((reuse_addr, clock)) = global_state.reuse.take_addr(
208 &mut *rng,
209 info.size,
210 info.align,
211 memory_kind,
212 this.active_thread(),
213 ) {
214 if let Some(clock) = clock {
215 this.acquire_clock(&clock);
216 }
217 interp_ok(reuse_addr)
218 } else {
219 // We have to pick a fresh address.
220 // Leave some space to the previous allocation, to give it some chance to be less aligned.
221 // We ensure that `(global_state.next_base_addr + slack) % 16` is uniformly distributed.
222 let slack = rng.random_range(0..16);
223 // From next_base_addr + slack, round up to adjust for alignment.
224 let base_addr = global_state
225 .next_base_addr
226 .checked_add(slack)
227 .ok_or_else(|| err_exhaust!(AddressSpaceFull))?;
228 let base_addr = align_addr(base_addr, info.align.bytes());
229
230 // Remember next base address. If this allocation is zero-sized, leave a gap of at
231 // least 1 to avoid two allocations having the same base address. (The logic in
232 // `alloc_id_from_addr` assumes unique addresses, and different function/vtable pointers
233 // need to be distinguishable!)
234 global_state.next_base_addr = base_addr
235 .checked_add(max(info.size.bytes(), 1))
236 .ok_or_else(|| err_exhaust!(AddressSpaceFull))?;
237 // Even if `Size` didn't overflow, we might still have filled up the address space.
238 if global_state.next_base_addr > this.target_usize_max() {
239 throw_exhaust!(AddressSpaceFull);
240 }
241
242 interp_ok(base_addr)
243 }
244 }
245
246 fn addr_from_alloc_id(
247 &self,
248 alloc_id: AllocId,
249 memory_kind: MemoryKind,
250 ) -> InterpResult<'tcx, u64> {
251 let this = self.eval_context_ref();
252 let mut global_state = this.machine.alloc_addresses.borrow_mut();
253 let global_state = &mut *global_state;
254
255 match global_state.base_addr.get(&alloc_id) {
256 Some(&addr) => interp_ok(addr),
257 None => {
258 // First time we're looking for the absolute address of this allocation.
259 let base_addr =
260 self.addr_from_alloc_id_uncached(global_state, alloc_id, memory_kind)?;
261 trace!("Assigning base address {:#x} to allocation {:?}", base_addr, alloc_id);
262
263 // Store address in cache.
264 global_state.base_addr.try_insert(alloc_id, base_addr).unwrap();
265
266 // Also maintain the opposite mapping in `int_to_ptr_map`, ensuring we keep it sorted.
267 // We have a fast-path for the common case that this address is bigger than all previous ones.
268 let pos = if global_state
269 .int_to_ptr_map
270 .last()
271 .is_some_and(|(last_addr, _)| *last_addr < base_addr)
272 {
273 global_state.int_to_ptr_map.len()
274 } else {
275 global_state
276 .int_to_ptr_map
277 .binary_search_by_key(&base_addr, |(addr, _)| *addr)
278 .unwrap_err()
279 };
280 global_state.int_to_ptr_map.insert(pos, (base_addr, alloc_id));
281
282 interp_ok(base_addr)
283 }
284 }
285 }
286}
287
288impl<'tcx> EvalContextExt<'tcx> for crate::MiriInterpCx<'tcx> {}
289pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
290 fn expose_provenance(&self, provenance: Provenance) -> InterpResult<'tcx> {
291 let this = self.eval_context_ref();
292 let mut global_state = this.machine.alloc_addresses.borrow_mut();
293
294 let (alloc_id, tag) = match provenance {
295 Provenance::Concrete { alloc_id, tag } => (alloc_id, tag),
296 Provenance::Wildcard => {
297 // No need to do anything for wildcard pointers as
298 // their provenances have already been previously exposed.
299 return interp_ok(());
300 }
301 };
302
303 // In strict mode, we don't need this, so we can save some cycles by not tracking it.
304 if global_state.provenance_mode == ProvenanceMode::Strict {
305 return interp_ok(());
306 }
307 // Exposing a dead alloc is a no-op, because it's not possible to get a dead allocation
308 // via int2ptr.
309 if !this.is_alloc_live(alloc_id) {
310 return interp_ok(());
311 }
312 trace!("Exposing allocation id {alloc_id:?}");
313 global_state.exposed.insert(alloc_id);
314 // Release the global state before we call `expose_tag`, which may call `get_alloc_info_extra`,
315 // which may need access to the global state.
316 drop(global_state);
317 if this.machine.borrow_tracker.is_some() {
318 this.expose_tag(alloc_id, tag)?;
319 }
320 interp_ok(())
321 }
322
323 fn ptr_from_addr_cast(&self, addr: u64) -> InterpResult<'tcx, Pointer> {
324 trace!("Casting {:#x} to a pointer", addr);
325
326 let this = self.eval_context_ref();
327 let global_state = this.machine.alloc_addresses.borrow();
328
329 // Potentially emit a warning.
330 match global_state.provenance_mode {
331 ProvenanceMode::Default => {
332 // The first time this happens at a particular location, print a warning.
333 let mut int2ptr_warned = this.machine.int2ptr_warned.borrow_mut();
334 let first = int2ptr_warned.is_empty();
335 if int2ptr_warned.insert(this.cur_span()) {
336 // Newly inserted, so first time we see this span.
337 this.emit_diagnostic(NonHaltingDiagnostic::Int2Ptr { details: first });
338 }
339 }
340 ProvenanceMode::Strict => {
341 throw_machine_stop!(TerminationInfo::Int2PtrWithStrictProvenance);
342 }
343 ProvenanceMode::Permissive => {}
344 }
345
346 // We do *not* look up the `AllocId` here! This is a `ptr as usize` cast, and it is
347 // completely legal to do a cast and then `wrapping_offset` to another allocation and only
348 // *then* do a memory access. So the allocation that the pointer happens to point to on a
349 // cast is fairly irrelevant. Instead we generate this as a "wildcard" pointer, such that
350 // *every time the pointer is used*, we do an `AllocId` lookup to find the (exposed)
351 // allocation it might be referencing.
352 interp_ok(Pointer::new(Some(Provenance::Wildcard), Size::from_bytes(addr)))
353 }
354
355 /// Convert a relative (tcx) pointer to a Miri pointer.
356 fn adjust_alloc_root_pointer(
357 &self,
358 ptr: interpret::Pointer<CtfeProvenance>,
359 tag: BorTag,
360 kind: MemoryKind,
361 ) -> InterpResult<'tcx, interpret::Pointer<Provenance>> {
362 let this = self.eval_context_ref();
363
364 let (prov, offset) = ptr.into_parts(); // offset is relative (AllocId provenance)
365 let alloc_id = prov.alloc_id();
366
367 // Get a pointer to the beginning of this allocation.
368 let base_addr = this.addr_from_alloc_id(alloc_id, kind)?;
369 let base_ptr = interpret::Pointer::new(
370 Provenance::Concrete { alloc_id, tag },
371 Size::from_bytes(base_addr),
372 );
373 // Add offset with the right kind of pointer-overflowing arithmetic.
374 interp_ok(base_ptr.wrapping_offset(offset, this))
375 }
376
377 // This returns some prepared `MiriAllocBytes`, either because `addr_from_alloc_id` reserved
378 // memory space in the past, or by doing the pre-allocation right upon being called.
379 fn get_global_alloc_bytes(
380 &self,
381 id: AllocId,
382 bytes: &[u8],
383 align: Align,
384 ) -> InterpResult<'tcx, MiriAllocBytes> {
385 let this = self.eval_context_ref();
386 assert!(this.tcx.try_get_global_alloc(id).is_some());
387 if this.machine.native_lib.is_some() {
388 // In native lib mode, MiriAllocBytes for global allocations are handled via `prepared_alloc_bytes`.
389 // This additional call ensures that some `MiriAllocBytes` are always prepared, just in case
390 // this function gets called before the first time `addr_from_alloc_id` gets called.
391 this.addr_from_alloc_id(id, MiriMemoryKind::Global.into())?;
392 // The memory we need here will have already been allocated during an earlier call to
393 // `addr_from_alloc_id` for this allocation. So don't create a new `MiriAllocBytes` here, instead
394 // fetch the previously prepared bytes from `prepared_alloc_bytes`.
395 let mut global_state = this.machine.alloc_addresses.borrow_mut();
396 let mut prepared_alloc_bytes = global_state
397 .prepared_alloc_bytes
398 .remove(&id)
399 .unwrap_or_else(|| panic!("alloc bytes for {id:?} have not been prepared"));
400 // Sanity-check that the prepared allocation has the right size and alignment.
401 assert!(prepared_alloc_bytes.as_ptr().is_aligned_to(align.bytes_usize()));
402 assert_eq!(prepared_alloc_bytes.len(), bytes.len());
403 // Copy allocation contents into prepared memory.
404 prepared_alloc_bytes.copy_from_slice(bytes);
405 interp_ok(prepared_alloc_bytes)
406 } else {
407 interp_ok(MiriAllocBytes::from_bytes(std::borrow::Cow::Borrowed(bytes), align))
408 }
409 }
410
411 /// When a pointer is used for a memory access, this computes where in which allocation the
412 /// access is going.
413 fn ptr_get_alloc(
414 &self,
415 ptr: interpret::Pointer<Provenance>,
416 size: i64,
417 ) -> Option<(AllocId, Size)> {
418 let this = self.eval_context_ref();
419
420 let (tag, addr) = ptr.into_parts(); // addr is absolute (Tag provenance)
421
422 let alloc_id = if let Provenance::Concrete { alloc_id, .. } = tag {
423 alloc_id
424 } else {
425 // A wildcard pointer.
426 this.alloc_id_from_addr(addr.bytes(), size)?
427 };
428
429 // This cannot fail: since we already have a pointer with that provenance, adjust_alloc_root_pointer
430 // must have been called in the past, so we can just look up the address in the map.
431 let base_addr = *this.machine.alloc_addresses.borrow().base_addr.get(&alloc_id).unwrap();
432
433 // Wrapping "addr - base_addr"
434 let rel_offset = this.truncate_to_target_usize(addr.bytes().wrapping_sub(base_addr));
435 Some((alloc_id, Size::from_bytes(rel_offset)))
436 }
437
438 /// Prepare all exposed memory for a native call.
439 /// This overapproximates the modifications which external code might make to memory:
440 /// We set all reachable allocations as initialized, mark all reachable provenances as exposed
441 /// and overwrite them with `Provenance::WILDCARD`.
442 fn prepare_exposed_for_native_call(&mut self) -> InterpResult<'tcx> {
443 let this = self.eval_context_mut();
444 // We need to make a deep copy of this list, but it's fine; it also serves as scratch space
445 // for the search within `prepare_for_native_call`.
446 let exposed: Vec<AllocId> =
447 this.machine.alloc_addresses.get_mut().exposed.iter().copied().collect();
448 this.prepare_for_native_call(exposed)
449 }
450}
451
452impl<'tcx> MiriMachine<'tcx> {
453 pub fn free_alloc_id(&mut self, dead_id: AllocId, size: Size, align: Align, kind: MemoryKind) {
454 let global_state = self.alloc_addresses.get_mut();
455 let rng = self.rng.get_mut();
456
457 // We can *not* remove this from `base_addr`, since the interpreter design requires that we
458 // be able to retrieve an AllocId + offset for any memory access *before* we check if the
459 // access is valid. Specifically, `ptr_get_alloc` is called on each attempt at a memory
460 // access to determine the allocation ID and offset -- and there can still be pointers with
461 // `dead_id` that one can attempt to use for a memory access. `ptr_get_alloc` may return
462 // `None` only if the pointer truly has no provenance (this ensures consistent error
463 // messages).
464 // However, we *can* remove it from `int_to_ptr_map`, since any wildcard pointers that exist
465 // can no longer actually be accessing that address. This ensures `alloc_id_from_addr` never
466 // returns a dead allocation.
467 // To avoid a linear scan we first look up the address in `base_addr`, and then find it in
468 // `int_to_ptr_map`.
469 let addr = *global_state.base_addr.get(&dead_id).unwrap();
470 let pos =
471 global_state.int_to_ptr_map.binary_search_by_key(&addr, |(addr, _)| *addr).unwrap();
472 let removed = global_state.int_to_ptr_map.remove(pos);
473 assert_eq!(removed, (addr, dead_id)); // double-check that we removed the right thing
474 // We can also remove it from `exposed`, since this allocation can anyway not be returned by
475 // `alloc_id_from_addr` any more.
476 global_state.exposed.remove(&dead_id);
477 // Also remember this address for future reuse.
478 let thread = self.threads.active_thread();
479 global_state.reuse.add_addr(rng, addr, size, align, kind, thread, || {
480 if let Some(data_race) = &self.data_race {
481 data_race.release_clock(&self.threads, |clock| clock.clone())
482 } else {
483 VClock::default()
484 }
485 })
486 }
487}
488
489#[cfg(test)]
490mod tests {
491 use super::*;
492
493 #[test]
494 fn test_align_addr() {
495 assert_eq!(align_addr(37, 4), 40);
496 assert_eq!(align_addr(44, 4), 44);
497 }
498}