rustc_const_eval/interpret/
memory.rs

1//! The memory subsystem.
2//!
3//! Generally, we use `Pointer` to denote memory addresses. However, some operations
4//! have a "size"-like parameter, and they take `Scalar` for the address because
5//! if the size is 0, then the pointer can also be a (properly aligned, non-null)
6//! integer. It is crucial that these operations call `check_align` *before*
7//! short-circuiting the empty case!
8
9use std::assert_matches::assert_matches;
10use std::borrow::{Borrow, Cow};
11use std::cell::Cell;
12use std::collections::VecDeque;
13use std::{fmt, ptr};
14
15use rustc_abi::{Align, HasDataLayout, Size};
16use rustc_ast::Mutability;
17use rustc_data_structures::fx::{FxHashSet, FxIndexMap};
18use rustc_middle::mir::display_allocation;
19use rustc_middle::ty::{self, Instance, Ty, TyCtxt};
20use rustc_middle::{bug, throw_ub_format};
21use tracing::{debug, instrument, trace};
22
23use super::{
24    AllocBytes, AllocId, AllocInit, AllocMap, AllocRange, Allocation, CheckAlignMsg,
25    CheckInAllocMsg, CtfeProvenance, GlobalAlloc, InterpCx, InterpResult, Machine, MayLeak,
26    Misalignment, Pointer, PointerArithmetic, Provenance, Scalar, alloc_range, err_ub,
27    err_ub_custom, interp_ok, throw_ub, throw_ub_custom, throw_unsup, throw_unsup_format,
28};
29use crate::const_eval::ConstEvalErrKind;
30use crate::fluent_generated as fluent;
31
32#[derive(Debug, PartialEq, Copy, Clone)]
33pub enum MemoryKind<T> {
34    /// Stack memory. Error if deallocated except during a stack pop.
35    Stack,
36    /// Memory allocated by `caller_location` intrinsic. Error if ever deallocated.
37    CallerLocation,
38    /// Additional memory kinds a machine wishes to distinguish from the builtin ones.
39    Machine(T),
40}
41
42impl<T: MayLeak> MayLeak for MemoryKind<T> {
43    #[inline]
44    fn may_leak(self) -> bool {
45        match self {
46            MemoryKind::Stack => false,
47            MemoryKind::CallerLocation => true,
48            MemoryKind::Machine(k) => k.may_leak(),
49        }
50    }
51}
52
53impl<T: fmt::Display> fmt::Display for MemoryKind<T> {
54    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
55        match self {
56            MemoryKind::Stack => write!(f, "stack variable"),
57            MemoryKind::CallerLocation => write!(f, "caller location"),
58            MemoryKind::Machine(m) => write!(f, "{m}"),
59        }
60    }
61}
62
63/// The return value of `get_alloc_info` indicates the "kind" of the allocation.
64#[derive(Copy, Clone, PartialEq, Debug)]
65pub enum AllocKind {
66    /// A regular live data allocation.
67    LiveData,
68    /// A function allocation (that fn ptrs point to).
69    Function,
70    /// A (symbolic) vtable allocation.
71    VTable,
72    /// A dead allocation.
73    Dead,
74}
75
76/// Metadata about an `AllocId`.
77#[derive(Copy, Clone, PartialEq, Debug)]
78pub struct AllocInfo {
79    pub size: Size,
80    pub align: Align,
81    pub kind: AllocKind,
82    pub mutbl: Mutability,
83}
84
85impl AllocInfo {
86    fn new(size: Size, align: Align, kind: AllocKind, mutbl: Mutability) -> Self {
87        Self { size, align, kind, mutbl }
88    }
89}
90
91/// The value of a function pointer.
92#[derive(Debug, Copy, Clone)]
93pub enum FnVal<'tcx, Other> {
94    Instance(Instance<'tcx>),
95    Other(Other),
96}
97
98impl<'tcx, Other> FnVal<'tcx, Other> {
99    pub fn as_instance(self) -> InterpResult<'tcx, Instance<'tcx>> {
100        match self {
101            FnVal::Instance(instance) => interp_ok(instance),
102            FnVal::Other(_) => {
103                throw_unsup_format!("'foreign' function pointers are not supported in this context")
104            }
105        }
106    }
107}
108
109// `Memory` has to depend on the `Machine` because some of its operations
110// (e.g., `get`) call a `Machine` hook.
111pub struct Memory<'tcx, M: Machine<'tcx>> {
112    /// Allocations local to this instance of the interpreter. The kind
113    /// helps ensure that the same mechanism is used for allocation and
114    /// deallocation. When an allocation is not found here, it is a
115    /// global and looked up in the `tcx` for read access. Some machines may
116    /// have to mutate this map even on a read-only access to a global (because
117    /// they do pointer provenance tracking and the allocations in `tcx` have
118    /// the wrong type), so we let the machine override this type.
119    /// Either way, if the machine allows writing to a global, doing so will
120    /// create a copy of the global allocation here.
121    // FIXME: this should not be public, but interning currently needs access to it
122    pub(super) alloc_map: M::MemoryMap,
123
124    /// Map for "extra" function pointers.
125    extra_fn_ptr_map: FxIndexMap<AllocId, M::ExtraFnVal>,
126
127    /// To be able to compare pointers with null, and to check alignment for accesses
128    /// to ZSTs (where pointers may dangle), we keep track of the size even for allocations
129    /// that do not exist any more.
130    // FIXME: this should not be public, but interning currently needs access to it
131    pub(super) dead_alloc_map: FxIndexMap<AllocId, (Size, Align)>,
132
133    /// This stores whether we are currently doing reads purely for the purpose of validation.
134    /// Those reads do not trigger the machine's hooks for memory reads.
135    /// Needless to say, this must only be set with great care!
136    validation_in_progress: Cell<bool>,
137}
138
139/// A reference to some allocation that was already bounds-checked for the given region
140/// and had the on-access machine hooks run.
141#[derive(Copy, Clone)]
142pub struct AllocRef<'a, 'tcx, Prov: Provenance, Extra, Bytes: AllocBytes = Box<[u8]>> {
143    alloc: &'a Allocation<Prov, Extra, Bytes>,
144    range: AllocRange,
145    tcx: TyCtxt<'tcx>,
146    alloc_id: AllocId,
147}
148/// A reference to some allocation that was already bounds-checked for the given region
149/// and had the on-access machine hooks run.
150pub struct AllocRefMut<'a, 'tcx, Prov: Provenance, Extra, Bytes: AllocBytes = Box<[u8]>> {
151    alloc: &'a mut Allocation<Prov, Extra, Bytes>,
152    range: AllocRange,
153    tcx: TyCtxt<'tcx>,
154    alloc_id: AllocId,
155}
156
157impl<'tcx, M: Machine<'tcx>> Memory<'tcx, M> {
158    pub fn new() -> Self {
159        Memory {
160            alloc_map: M::MemoryMap::default(),
161            extra_fn_ptr_map: FxIndexMap::default(),
162            dead_alloc_map: FxIndexMap::default(),
163            validation_in_progress: Cell::new(false),
164        }
165    }
166
167    /// This is used by [priroda](https://github.com/oli-obk/priroda)
168    pub fn alloc_map(&self) -> &M::MemoryMap {
169        &self.alloc_map
170    }
171}
172
173impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
174    /// Call this to turn untagged "global" pointers (obtained via `tcx`) into
175    /// the machine pointer to the allocation. Must never be used
176    /// for any other pointers, nor for TLS statics.
177    ///
178    /// Using the resulting pointer represents a *direct* access to that memory
179    /// (e.g. by directly using a `static`),
180    /// as opposed to access through a pointer that was created by the program.
181    ///
182    /// This function can fail only if `ptr` points to an `extern static`.
183    #[inline]
184    pub fn global_root_pointer(
185        &self,
186        ptr: Pointer<CtfeProvenance>,
187    ) -> InterpResult<'tcx, Pointer<M::Provenance>> {
188        let alloc_id = ptr.provenance.alloc_id();
189        // We need to handle `extern static`.
190        match self.tcx.try_get_global_alloc(alloc_id) {
191            Some(GlobalAlloc::Static(def_id)) if self.tcx.is_thread_local_static(def_id) => {
192                // Thread-local statics do not have a constant address. They *must* be accessed via
193                // `ThreadLocalRef`; we can never have a pointer to them as a regular constant value.
194                bug!("global memory cannot point to thread-local static")
195            }
196            Some(GlobalAlloc::Static(def_id)) if self.tcx.is_foreign_item(def_id) => {
197                return M::extern_static_pointer(self, def_id);
198            }
199            None => {
200                assert!(
201                    self.memory.extra_fn_ptr_map.contains_key(&alloc_id),
202                    "{alloc_id:?} is neither global nor a function pointer"
203                );
204            }
205            _ => {}
206        }
207        // And we need to get the provenance.
208        M::adjust_alloc_root_pointer(self, ptr, M::GLOBAL_KIND.map(MemoryKind::Machine))
209    }
210
211    pub fn fn_ptr(&mut self, fn_val: FnVal<'tcx, M::ExtraFnVal>) -> Pointer<M::Provenance> {
212        let id = match fn_val {
213            FnVal::Instance(instance) => {
214                let salt = M::get_global_alloc_salt(self, Some(instance));
215                self.tcx.reserve_and_set_fn_alloc(instance, salt)
216            }
217            FnVal::Other(extra) => {
218                // FIXME(RalfJung): Should we have a cache here?
219                let id = self.tcx.reserve_alloc_id();
220                let old = self.memory.extra_fn_ptr_map.insert(id, extra);
221                assert!(old.is_none());
222                id
223            }
224        };
225        // Functions are global allocations, so make sure we get the right root pointer.
226        // We know this is not an `extern static` so this cannot fail.
227        self.global_root_pointer(Pointer::from(id)).unwrap()
228    }
229
230    pub fn allocate_ptr(
231        &mut self,
232        size: Size,
233        align: Align,
234        kind: MemoryKind<M::MemoryKind>,
235        init: AllocInit,
236    ) -> InterpResult<'tcx, Pointer<M::Provenance>> {
237        let params = self.machine.get_default_alloc_params();
238        let alloc = if M::PANIC_ON_ALLOC_FAIL {
239            Allocation::new(size, align, init, params)
240        } else {
241            Allocation::try_new(size, align, init, params)?
242        };
243        self.insert_allocation(alloc, kind)
244    }
245
246    pub fn allocate_bytes_ptr(
247        &mut self,
248        bytes: &[u8],
249        align: Align,
250        kind: MemoryKind<M::MemoryKind>,
251        mutability: Mutability,
252    ) -> InterpResult<'tcx, Pointer<M::Provenance>> {
253        let params = self.machine.get_default_alloc_params();
254        let alloc = Allocation::from_bytes(bytes, align, mutability, params);
255        self.insert_allocation(alloc, kind)
256    }
257
258    pub fn insert_allocation(
259        &mut self,
260        alloc: Allocation<M::Provenance, (), M::Bytes>,
261        kind: MemoryKind<M::MemoryKind>,
262    ) -> InterpResult<'tcx, Pointer<M::Provenance>> {
263        assert!(alloc.size() <= self.max_size_of_val());
264        let id = self.tcx.reserve_alloc_id();
265        debug_assert_ne!(
266            Some(kind),
267            M::GLOBAL_KIND.map(MemoryKind::Machine),
268            "dynamically allocating global memory"
269        );
270        // This cannot be merged with the `adjust_global_allocation` code path
271        // since here we have an allocation that already uses `M::Bytes`.
272        let extra = M::init_local_allocation(self, id, kind, alloc.size(), alloc.align)?;
273        let alloc = alloc.with_extra(extra);
274        self.memory.alloc_map.insert(id, (kind, alloc));
275        M::adjust_alloc_root_pointer(self, Pointer::from(id), Some(kind))
276    }
277
278    /// If this grows the allocation, `init_growth` determines
279    /// whether the additional space will be initialized.
280    pub fn reallocate_ptr(
281        &mut self,
282        ptr: Pointer<Option<M::Provenance>>,
283        old_size_and_align: Option<(Size, Align)>,
284        new_size: Size,
285        new_align: Align,
286        kind: MemoryKind<M::MemoryKind>,
287        init_growth: AllocInit,
288    ) -> InterpResult<'tcx, Pointer<M::Provenance>> {
289        let (alloc_id, offset, _prov) = self.ptr_get_alloc_id(ptr, 0)?;
290        if offset.bytes() != 0 {
291            throw_ub_custom!(
292                fluent::const_eval_realloc_or_alloc_with_offset,
293                ptr = format!("{ptr:?}"),
294                kind = "realloc"
295            );
296        }
297
298        // For simplicities' sake, we implement reallocate as "alloc, copy, dealloc".
299        // This happens so rarely, the perf advantage is outweighed by the maintenance cost.
300        // If requested, we zero-init the entire allocation, to ensure that a growing
301        // allocation has its new bytes properly set. For the part that is copied,
302        // `mem_copy` below will de-initialize things as necessary.
303        let new_ptr = self.allocate_ptr(new_size, new_align, kind, init_growth)?;
304        let old_size = match old_size_and_align {
305            Some((size, _align)) => size,
306            None => self.get_alloc_raw(alloc_id)?.size(),
307        };
308        // This will also call the access hooks.
309        self.mem_copy(ptr, new_ptr.into(), old_size.min(new_size), /*nonoverlapping*/ true)?;
310        self.deallocate_ptr(ptr, old_size_and_align, kind)?;
311
312        interp_ok(new_ptr)
313    }
314
315    /// Mark the `const_allocate`d allocation `ptr` points to as immutable so we can intern it.
316    pub fn make_const_heap_ptr_global(
317        &mut self,
318        ptr: Pointer<Option<CtfeProvenance>>,
319    ) -> InterpResult<'tcx>
320    where
321        M: Machine<'tcx, MemoryKind = crate::const_eval::MemoryKind, Provenance = CtfeProvenance>,
322    {
323        let (alloc_id, offset, _) = self.ptr_get_alloc_id(ptr, 0)?;
324        if offset.bytes() != 0 {
325            return Err(ConstEvalErrKind::ConstMakeGlobalWithOffset(ptr)).into();
326        }
327
328        if matches!(self.tcx.try_get_global_alloc(alloc_id), Some(_)) {
329            // This points to something outside the current interpreter.
330            return Err(ConstEvalErrKind::ConstMakeGlobalPtrIsNonHeap(ptr)).into();
331        }
332
333        // If we can't find it in `alloc_map` it must be dangling (because we don't use
334        // `extra_fn_ptr_map` in const-eval).
335        let (kind, alloc) = self
336            .memory
337            .alloc_map
338            .get_mut_or(alloc_id, || Err(ConstEvalErrKind::ConstMakeGlobalWithDanglingPtr(ptr)))?;
339
340        // Ensure this is actually a *heap* allocation, and record it as made-global.
341        match kind {
342            MemoryKind::Stack | MemoryKind::CallerLocation => {
343                return Err(ConstEvalErrKind::ConstMakeGlobalPtrIsNonHeap(ptr)).into();
344            }
345            MemoryKind::Machine(crate::const_eval::MemoryKind::Heap { was_made_global }) => {
346                if *was_made_global {
347                    return Err(ConstEvalErrKind::ConstMakeGlobalPtrAlreadyMadeGlobal(alloc_id))
348                        .into();
349                }
350                *was_made_global = true;
351            }
352        }
353
354        // Prevent further mutation, this is now an immutable global.
355        alloc.mutability = Mutability::Not;
356
357        interp_ok(())
358    }
359
360    #[instrument(skip(self), level = "debug")]
361    pub fn deallocate_ptr(
362        &mut self,
363        ptr: Pointer<Option<M::Provenance>>,
364        old_size_and_align: Option<(Size, Align)>,
365        kind: MemoryKind<M::MemoryKind>,
366    ) -> InterpResult<'tcx> {
367        let (alloc_id, offset, prov) = self.ptr_get_alloc_id(ptr, 0)?;
368        trace!("deallocating: {alloc_id:?}");
369
370        if offset.bytes() != 0 {
371            throw_ub_custom!(
372                fluent::const_eval_realloc_or_alloc_with_offset,
373                ptr = format!("{ptr:?}"),
374                kind = "dealloc",
375            );
376        }
377
378        let Some((alloc_kind, mut alloc)) = self.memory.alloc_map.remove(&alloc_id) else {
379            // Deallocating global memory -- always an error
380            return Err(match self.tcx.try_get_global_alloc(alloc_id) {
381                Some(GlobalAlloc::Function { .. }) => {
382                    err_ub_custom!(
383                        fluent::const_eval_invalid_dealloc,
384                        alloc_id = alloc_id,
385                        kind = "fn",
386                    )
387                }
388                Some(GlobalAlloc::VTable(..)) => {
389                    err_ub_custom!(
390                        fluent::const_eval_invalid_dealloc,
391                        alloc_id = alloc_id,
392                        kind = "vtable",
393                    )
394                }
395                Some(GlobalAlloc::TypeId { .. }) => {
396                    err_ub_custom!(
397                        fluent::const_eval_invalid_dealloc,
398                        alloc_id = alloc_id,
399                        kind = "typeid",
400                    )
401                }
402                Some(GlobalAlloc::Static(..) | GlobalAlloc::Memory(..)) => {
403                    err_ub_custom!(
404                        fluent::const_eval_invalid_dealloc,
405                        alloc_id = alloc_id,
406                        kind = "static_mem"
407                    )
408                }
409                None => err_ub!(PointerUseAfterFree(alloc_id, CheckInAllocMsg::MemoryAccess)),
410            })
411            .into();
412        };
413
414        if alloc.mutability.is_not() {
415            throw_ub_custom!(fluent::const_eval_dealloc_immutable, alloc = alloc_id,);
416        }
417        if alloc_kind != kind {
418            throw_ub_custom!(
419                fluent::const_eval_dealloc_kind_mismatch,
420                alloc = alloc_id,
421                alloc_kind = format!("{alloc_kind}"),
422                kind = format!("{kind}"),
423            );
424        }
425        if let Some((size, align)) = old_size_and_align {
426            if size != alloc.size() || align != alloc.align {
427                throw_ub_custom!(
428                    fluent::const_eval_dealloc_incorrect_layout,
429                    alloc = alloc_id,
430                    size = alloc.size().bytes(),
431                    align = alloc.align.bytes(),
432                    size_found = size.bytes(),
433                    align_found = align.bytes(),
434                )
435            }
436        }
437
438        // Let the machine take some extra action
439        let size = alloc.size();
440        M::before_memory_deallocation(
441            self.tcx,
442            &mut self.machine,
443            &mut alloc.extra,
444            ptr,
445            (alloc_id, prov),
446            size,
447            alloc.align,
448            kind,
449        )?;
450
451        // Don't forget to remember size and align of this now-dead allocation
452        let old = self.memory.dead_alloc_map.insert(alloc_id, (size, alloc.align));
453        if old.is_some() {
454            bug!("Nothing can be deallocated twice");
455        }
456
457        interp_ok(())
458    }
459
460    /// Internal helper function to determine the allocation and offset of a pointer (if any).
461    #[inline(always)]
462    fn get_ptr_access(
463        &self,
464        ptr: Pointer<Option<M::Provenance>>,
465        size: Size,
466    ) -> InterpResult<'tcx, Option<(AllocId, Size, M::ProvenanceExtra)>> {
467        let size = i64::try_from(size.bytes()).unwrap(); // it would be an error to even ask for more than isize::MAX bytes
468        Self::check_and_deref_ptr(
469            self,
470            ptr,
471            size,
472            CheckInAllocMsg::MemoryAccess,
473            |this, alloc_id, offset, prov| {
474                let (size, align) =
475                    this.get_live_alloc_size_and_align(alloc_id, CheckInAllocMsg::MemoryAccess)?;
476                interp_ok((size, align, (alloc_id, offset, prov)))
477            },
478        )
479    }
480
481    /// Check if the given pointer points to live memory of the given `size`.
482    /// The caller can control the error message for the out-of-bounds case.
483    #[inline(always)]
484    pub fn check_ptr_access(
485        &self,
486        ptr: Pointer<Option<M::Provenance>>,
487        size: Size,
488        msg: CheckInAllocMsg,
489    ) -> InterpResult<'tcx> {
490        let size = i64::try_from(size.bytes()).unwrap(); // it would be an error to even ask for more than isize::MAX bytes
491        Self::check_and_deref_ptr(self, ptr, size, msg, |this, alloc_id, _, _| {
492            let (size, align) = this.get_live_alloc_size_and_align(alloc_id, msg)?;
493            interp_ok((size, align, ()))
494        })?;
495        interp_ok(())
496    }
497
498    /// Check whether the given pointer points to live memory for a signed amount of bytes.
499    /// A negative amounts means that the given range of memory to the left of the pointer
500    /// needs to be dereferenceable.
501    pub fn check_ptr_access_signed(
502        &self,
503        ptr: Pointer<Option<M::Provenance>>,
504        size: i64,
505        msg: CheckInAllocMsg,
506    ) -> InterpResult<'tcx> {
507        Self::check_and_deref_ptr(self, ptr, size, msg, |this, alloc_id, _, _| {
508            let (size, align) = this.get_live_alloc_size_and_align(alloc_id, msg)?;
509            interp_ok((size, align, ()))
510        })?;
511        interp_ok(())
512    }
513
514    /// Low-level helper function to check if a ptr is in-bounds and potentially return a reference
515    /// to the allocation it points to. Supports both shared and mutable references, as the actual
516    /// checking is offloaded to a helper closure. Supports signed sizes for checks "to the left" of
517    /// a pointer.
518    ///
519    /// `alloc_size` will only get called for non-zero-sized accesses.
520    ///
521    /// Returns `None` if and only if the size is 0.
522    fn check_and_deref_ptr<T, R: Borrow<Self>>(
523        this: R,
524        ptr: Pointer<Option<M::Provenance>>,
525        size: i64,
526        msg: CheckInAllocMsg,
527        alloc_size: impl FnOnce(
528            R,
529            AllocId,
530            Size,
531            M::ProvenanceExtra,
532        ) -> InterpResult<'tcx, (Size, Align, T)>,
533    ) -> InterpResult<'tcx, Option<T>> {
534        // Everything is okay with size 0.
535        if size == 0 {
536            return interp_ok(None);
537        }
538
539        interp_ok(match this.borrow().ptr_try_get_alloc_id(ptr, size) {
540            Err(addr) => {
541                // We couldn't get a proper allocation.
542                throw_ub!(DanglingIntPointer { addr, inbounds_size: size, msg });
543            }
544            Ok((alloc_id, offset, prov)) => {
545                let tcx = this.borrow().tcx;
546                let (alloc_size, _alloc_align, ret_val) = alloc_size(this, alloc_id, offset, prov)?;
547                let offset = offset.bytes();
548                // Compute absolute begin and end of the range.
549                let (begin, end) = if size >= 0 {
550                    (Some(offset), offset.checked_add(size as u64))
551                } else {
552                    (offset.checked_sub(size.unsigned_abs()), Some(offset))
553                };
554                // Ensure both are within bounds.
555                let in_bounds = begin.is_some() && end.is_some_and(|e| e <= alloc_size.bytes());
556                if !in_bounds {
557                    throw_ub!(PointerOutOfBounds {
558                        alloc_id,
559                        alloc_size,
560                        ptr_offset: tcx.sign_extend_to_target_isize(offset),
561                        inbounds_size: size,
562                        msg,
563                    })
564                }
565
566                Some(ret_val)
567            }
568        })
569    }
570
571    pub(super) fn check_misalign(
572        &self,
573        misaligned: Option<Misalignment>,
574        msg: CheckAlignMsg,
575    ) -> InterpResult<'tcx> {
576        if let Some(misaligned) = misaligned {
577            throw_ub!(AlignmentCheckFailed(misaligned, msg))
578        }
579        interp_ok(())
580    }
581
582    pub(super) fn is_ptr_misaligned(
583        &self,
584        ptr: Pointer<Option<M::Provenance>>,
585        align: Align,
586    ) -> Option<Misalignment> {
587        if !M::enforce_alignment(self) || align.bytes() == 1 {
588            return None;
589        }
590
591        #[inline]
592        fn is_offset_misaligned(offset: u64, align: Align) -> Option<Misalignment> {
593            if offset.is_multiple_of(align.bytes()) {
594                None
595            } else {
596                // The biggest power of two through which `offset` is divisible.
597                let offset_pow2 = 1 << offset.trailing_zeros();
598                Some(Misalignment { has: Align::from_bytes(offset_pow2).unwrap(), required: align })
599            }
600        }
601
602        match self.ptr_try_get_alloc_id(ptr, 0) {
603            Err(addr) => is_offset_misaligned(addr, align),
604            Ok((alloc_id, offset, _prov)) => {
605                let alloc_info = self.get_alloc_info(alloc_id);
606                if let Some(misalign) = M::alignment_check(
607                    self,
608                    alloc_id,
609                    alloc_info.align,
610                    alloc_info.kind,
611                    offset,
612                    align,
613                ) {
614                    Some(misalign)
615                } else if M::Provenance::OFFSET_IS_ADDR {
616                    is_offset_misaligned(ptr.addr().bytes(), align)
617                } else {
618                    // Check allocation alignment and offset alignment.
619                    if alloc_info.align.bytes() < align.bytes() {
620                        Some(Misalignment { has: alloc_info.align, required: align })
621                    } else {
622                        is_offset_misaligned(offset.bytes(), align)
623                    }
624                }
625            }
626        }
627    }
628
629    /// Checks a pointer for misalignment.
630    ///
631    /// The error assumes this is checking the pointer used directly for an access.
632    pub fn check_ptr_align(
633        &self,
634        ptr: Pointer<Option<M::Provenance>>,
635        align: Align,
636    ) -> InterpResult<'tcx> {
637        self.check_misalign(self.is_ptr_misaligned(ptr, align), CheckAlignMsg::AccessedPtr)
638    }
639}
640
641impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
642    /// This function is used by Miri's provenance GC to remove unreachable entries from the dead_alloc_map.
643    pub fn remove_unreachable_allocs(&mut self, reachable_allocs: &FxHashSet<AllocId>) {
644        // Unlike all the other GC helpers where we check if an `AllocId` is found in the interpreter or
645        // is live, here all the IDs in the map are for dead allocations so we don't
646        // need to check for liveness.
647        #[allow(rustc::potential_query_instability)] // Only used from Miri, not queries.
648        self.memory.dead_alloc_map.retain(|id, _| reachable_allocs.contains(id));
649    }
650}
651
652/// Allocation accessors
653impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
654    /// Helper function to obtain a global (tcx) allocation.
655    /// This attempts to return a reference to an existing allocation if
656    /// one can be found in `tcx`. That, however, is only possible if `tcx` and
657    /// this machine use the same pointer provenance, so it is indirected through
658    /// `M::adjust_allocation`.
659    fn get_global_alloc(
660        &self,
661        id: AllocId,
662        is_write: bool,
663    ) -> InterpResult<'tcx, Cow<'tcx, Allocation<M::Provenance, M::AllocExtra, M::Bytes>>> {
664        let (alloc, def_id) = match self.tcx.try_get_global_alloc(id) {
665            Some(GlobalAlloc::Memory(mem)) => {
666                // Memory of a constant or promoted or anonymous memory referenced by a static.
667                (mem, None)
668            }
669            Some(GlobalAlloc::Function { .. }) => throw_ub!(DerefFunctionPointer(id)),
670            Some(GlobalAlloc::VTable(..)) => throw_ub!(DerefVTablePointer(id)),
671            Some(GlobalAlloc::TypeId { .. }) => throw_ub!(DerefTypeIdPointer(id)),
672            None => throw_ub!(PointerUseAfterFree(id, CheckInAllocMsg::MemoryAccess)),
673            Some(GlobalAlloc::Static(def_id)) => {
674                assert!(self.tcx.is_static(def_id));
675                // Thread-local statics do not have a constant address. They *must* be accessed via
676                // `ThreadLocalRef`; we can never have a pointer to them as a regular constant value.
677                assert!(!self.tcx.is_thread_local_static(def_id));
678                // Notice that every static has two `AllocId` that will resolve to the same
679                // thing here: one maps to `GlobalAlloc::Static`, this is the "lazy" ID,
680                // and the other one is maps to `GlobalAlloc::Memory`, this is returned by
681                // `eval_static_initializer` and it is the "resolved" ID.
682                // The resolved ID is never used by the interpreted program, it is hidden.
683                // This is relied upon for soundness of const-patterns; a pointer to the resolved
684                // ID would "sidestep" the checks that make sure consts do not point to statics!
685                // The `GlobalAlloc::Memory` branch here is still reachable though; when a static
686                // contains a reference to memory that was created during its evaluation (i.e., not
687                // to another static), those inner references only exist in "resolved" form.
688                if self.tcx.is_foreign_item(def_id) {
689                    // This is unreachable in Miri, but can happen in CTFE where we actually *do* support
690                    // referencing arbitrary (declared) extern statics.
691                    throw_unsup!(ExternStatic(def_id));
692                }
693
694                // We don't give a span -- statics don't need that, they cannot be generic or associated.
695                let val = self.ctfe_query(|tcx| tcx.eval_static_initializer(def_id))?;
696                (val, Some(def_id))
697            }
698        };
699        M::before_access_global(self.tcx, &self.machine, id, alloc, def_id, is_write)?;
700        // We got tcx memory. Let the machine initialize its "extra" stuff.
701        M::adjust_global_allocation(
702            self,
703            id, // always use the ID we got as input, not the "hidden" one.
704            alloc.inner(),
705        )
706    }
707
708    /// Gives raw access to the `Allocation`, without bounds or alignment checks.
709    /// The caller is responsible for calling the access hooks!
710    ///
711    /// You almost certainly want to use `get_ptr_alloc`/`get_ptr_alloc_mut` instead.
712    pub fn get_alloc_raw(
713        &self,
714        id: AllocId,
715    ) -> InterpResult<'tcx, &Allocation<M::Provenance, M::AllocExtra, M::Bytes>> {
716        // The error type of the inner closure here is somewhat funny. We have two
717        // ways of "erroring": An actual error, or because we got a reference from
718        // `get_global_alloc` that we can actually use directly without inserting anything anywhere.
719        // So the error type is `InterpResult<'tcx, &Allocation<M::Provenance>>`.
720        let a = self.memory.alloc_map.get_or(id, || {
721            // We have to funnel the `InterpErrorInfo` through a `Result` to match the `get_or` API,
722            // so we use `report_err` for that.
723            let alloc = self.get_global_alloc(id, /*is_write*/ false).report_err().map_err(Err)?;
724            match alloc {
725                Cow::Borrowed(alloc) => {
726                    // We got a ref, cheaply return that as an "error" so that the
727                    // map does not get mutated.
728                    Err(Ok(alloc))
729                }
730                Cow::Owned(alloc) => {
731                    // Need to put it into the map and return a ref to that
732                    let kind = M::GLOBAL_KIND.expect(
733                        "I got a global allocation that I have to copy but the machine does \
734                            not expect that to happen",
735                    );
736                    Ok((MemoryKind::Machine(kind), alloc))
737                }
738            }
739        });
740        // Now unpack that funny error type
741        match a {
742            Ok(a) => interp_ok(&a.1),
743            Err(a) => a.into(),
744        }
745    }
746
747    /// Gives raw, immutable access to the `Allocation` address, without bounds or alignment checks.
748    /// The caller is responsible for calling the access hooks!
749    pub fn get_alloc_bytes_unchecked_raw(&self, id: AllocId) -> InterpResult<'tcx, *const u8> {
750        let alloc = self.get_alloc_raw(id)?;
751        interp_ok(alloc.get_bytes_unchecked_raw())
752    }
753
754    /// Bounds-checked *but not align-checked* allocation access.
755    pub fn get_ptr_alloc<'a>(
756        &'a self,
757        ptr: Pointer<Option<M::Provenance>>,
758        size: Size,
759    ) -> InterpResult<'tcx, Option<AllocRef<'a, 'tcx, M::Provenance, M::AllocExtra, M::Bytes>>>
760    {
761        let size_i64 = i64::try_from(size.bytes()).unwrap(); // it would be an error to even ask for more than isize::MAX bytes
762        let ptr_and_alloc = Self::check_and_deref_ptr(
763            self,
764            ptr,
765            size_i64,
766            CheckInAllocMsg::MemoryAccess,
767            |this, alloc_id, offset, prov| {
768                let alloc = this.get_alloc_raw(alloc_id)?;
769                interp_ok((alloc.size(), alloc.align, (alloc_id, offset, prov, alloc)))
770            },
771        )?;
772        // We want to call the hook on *all* accesses that involve an AllocId, including zero-sized
773        // accesses. That means we cannot rely on the closure above or the `Some` branch below. We
774        // do this after `check_and_deref_ptr` to ensure some basic sanity has already been checked.
775        if !self.memory.validation_in_progress.get() {
776            if let Ok((alloc_id, ..)) = self.ptr_try_get_alloc_id(ptr, size_i64) {
777                M::before_alloc_access(self.tcx, &self.machine, alloc_id)?;
778            }
779        }
780
781        if let Some((alloc_id, offset, prov, alloc)) = ptr_and_alloc {
782            let range = alloc_range(offset, size);
783            if !self.memory.validation_in_progress.get() {
784                M::before_memory_read(
785                    self.tcx,
786                    &self.machine,
787                    &alloc.extra,
788                    ptr,
789                    (alloc_id, prov),
790                    range,
791                )?;
792            }
793            interp_ok(Some(AllocRef { alloc, range, tcx: *self.tcx, alloc_id }))
794        } else {
795            interp_ok(None)
796        }
797    }
798
799    /// Return the `extra` field of the given allocation.
800    pub fn get_alloc_extra<'a>(&'a self, id: AllocId) -> InterpResult<'tcx, &'a M::AllocExtra> {
801        interp_ok(&self.get_alloc_raw(id)?.extra)
802    }
803
804    /// Return the `mutability` field of the given allocation.
805    pub fn get_alloc_mutability<'a>(&'a self, id: AllocId) -> InterpResult<'tcx, Mutability> {
806        interp_ok(self.get_alloc_raw(id)?.mutability)
807    }
808
809    /// Gives raw mutable access to the `Allocation`, without bounds or alignment checks.
810    /// The caller is responsible for calling the access hooks!
811    ///
812    /// Also returns a ptr to `self.extra` so that the caller can use it in parallel with the
813    /// allocation.
814    ///
815    /// You almost certainly want to use `get_ptr_alloc`/`get_ptr_alloc_mut` instead.
816    pub fn get_alloc_raw_mut(
817        &mut self,
818        id: AllocId,
819    ) -> InterpResult<'tcx, (&mut Allocation<M::Provenance, M::AllocExtra, M::Bytes>, &mut M)> {
820        // We have "NLL problem case #3" here, which cannot be worked around without loss of
821        // efficiency even for the common case where the key is in the map.
822        // <https://rust-lang.github.io/rfcs/2094-nll.html#problem-case-3-conditional-control-flow-across-functions>
823        // (Cannot use `get_mut_or` since `get_global_alloc` needs `&self`, and that boils down to
824        // Miri's `adjust_alloc_root_pointer` needing to look up the size of the allocation.
825        // It could be avoided with a totally separate codepath in Miri for handling the absolute address
826        // of global allocations, but that's not worth it.)
827        if self.memory.alloc_map.get_mut(id).is_none() {
828            // Slow path.
829            // Allocation not found locally, go look global.
830            let alloc = self.get_global_alloc(id, /*is_write*/ true)?;
831            let kind = M::GLOBAL_KIND.expect(
832                "I got a global allocation that I have to copy but the machine does \
833                    not expect that to happen",
834            );
835            self.memory.alloc_map.insert(id, (MemoryKind::Machine(kind), alloc.into_owned()));
836        }
837
838        let (_kind, alloc) = self.memory.alloc_map.get_mut(id).unwrap();
839        if alloc.mutability.is_not() {
840            throw_ub!(WriteToReadOnly(id))
841        }
842        interp_ok((alloc, &mut self.machine))
843    }
844
845    /// Gives raw, mutable access to the `Allocation` address, without bounds or alignment checks.
846    /// The caller is responsible for calling the access hooks!
847    pub fn get_alloc_bytes_unchecked_raw_mut(
848        &mut self,
849        id: AllocId,
850    ) -> InterpResult<'tcx, *mut u8> {
851        let alloc = self.get_alloc_raw_mut(id)?.0;
852        interp_ok(alloc.get_bytes_unchecked_raw_mut())
853    }
854
855    /// Bounds-checked *but not align-checked* allocation access.
856    pub fn get_ptr_alloc_mut<'a>(
857        &'a mut self,
858        ptr: Pointer<Option<M::Provenance>>,
859        size: Size,
860    ) -> InterpResult<'tcx, Option<AllocRefMut<'a, 'tcx, M::Provenance, M::AllocExtra, M::Bytes>>>
861    {
862        let tcx = self.tcx;
863        let validation_in_progress = self.memory.validation_in_progress.get();
864
865        let size_i64 = i64::try_from(size.bytes()).unwrap(); // it would be an error to even ask for more than isize::MAX bytes
866        let ptr_and_alloc = Self::check_and_deref_ptr(
867            self,
868            ptr,
869            size_i64,
870            CheckInAllocMsg::MemoryAccess,
871            |this, alloc_id, offset, prov| {
872                let (alloc, machine) = this.get_alloc_raw_mut(alloc_id)?;
873                interp_ok((alloc.size(), alloc.align, (alloc_id, offset, prov, alloc, machine)))
874            },
875        )?;
876
877        if let Some((alloc_id, offset, prov, alloc, machine)) = ptr_and_alloc {
878            let range = alloc_range(offset, size);
879            if !validation_in_progress {
880                // For writes, it's okay to only call those when there actually is a non-zero
881                // amount of bytes to be written: a zero-sized write doesn't manifest anything.
882                M::before_alloc_access(tcx, machine, alloc_id)?;
883                M::before_memory_write(
884                    tcx,
885                    machine,
886                    &mut alloc.extra,
887                    ptr,
888                    (alloc_id, prov),
889                    range,
890                )?;
891            }
892            interp_ok(Some(AllocRefMut { alloc, range, tcx: *tcx, alloc_id }))
893        } else {
894            interp_ok(None)
895        }
896    }
897
898    /// Return the `extra` field of the given allocation.
899    pub fn get_alloc_extra_mut<'a>(
900        &'a mut self,
901        id: AllocId,
902    ) -> InterpResult<'tcx, (&'a mut M::AllocExtra, &'a mut M)> {
903        let (alloc, machine) = self.get_alloc_raw_mut(id)?;
904        interp_ok((&mut alloc.extra, machine))
905    }
906
907    /// Check whether an allocation is live. This is faster than calling
908    /// [`InterpCx::get_alloc_info`] if all you need to check is whether the kind is
909    /// [`AllocKind::Dead`] because it doesn't have to look up the type and layout of statics.
910    pub fn is_alloc_live(&self, id: AllocId) -> bool {
911        self.memory.alloc_map.contains_key_ref(&id)
912            || self.memory.extra_fn_ptr_map.contains_key(&id)
913            // We check `tcx` last as that has to acquire a lock in `many-seeds` mode.
914            // This also matches the order in `get_alloc_info`.
915            || self.tcx.try_get_global_alloc(id).is_some()
916    }
917
918    /// Obtain the size and alignment of an allocation, even if that allocation has
919    /// been deallocated.
920    pub fn get_alloc_info(&self, id: AllocId) -> AllocInfo {
921        // # Regular allocations
922        // Don't use `self.get_raw` here as that will
923        // a) cause cycles in case `id` refers to a static
924        // b) duplicate a global's allocation in miri
925        if let Some((_, alloc)) = self.memory.alloc_map.get(id) {
926            return AllocInfo::new(
927                alloc.size(),
928                alloc.align,
929                AllocKind::LiveData,
930                alloc.mutability,
931            );
932        }
933
934        // # Function pointers
935        // (both global from `alloc_map` and local from `extra_fn_ptr_map`)
936        if let Some(fn_val) = self.get_fn_alloc(id) {
937            let align = match fn_val {
938                FnVal::Instance(instance) => {
939                    self.tcx.codegen_instance_attrs(instance.def).alignment.unwrap_or(Align::ONE)
940                }
941                // Machine-specific extra functions currently do not support alignment restrictions.
942                FnVal::Other(_) => Align::ONE,
943            };
944
945            return AllocInfo::new(Size::ZERO, align, AllocKind::Function, Mutability::Not);
946        }
947
948        // # Global allocations
949        if let Some(global_alloc) = self.tcx.try_get_global_alloc(id) {
950            let (size, align) = global_alloc.size_and_align(*self.tcx, self.typing_env);
951            let mutbl = global_alloc.mutability(*self.tcx, self.typing_env);
952            let kind = match global_alloc {
953                GlobalAlloc::TypeId { .. }
954                | GlobalAlloc::Static { .. }
955                | GlobalAlloc::Memory { .. } => AllocKind::LiveData,
956                GlobalAlloc::Function { .. } => bug!("We already checked function pointers above"),
957                GlobalAlloc::VTable { .. } => AllocKind::VTable,
958            };
959            return AllocInfo::new(size, align, kind, mutbl);
960        }
961
962        // # Dead pointers
963        let (size, align) = *self
964            .memory
965            .dead_alloc_map
966            .get(&id)
967            .expect("deallocated pointers should all be recorded in `dead_alloc_map`");
968        AllocInfo::new(size, align, AllocKind::Dead, Mutability::Not)
969    }
970
971    /// Obtain the size and alignment of a *live* allocation.
972    fn get_live_alloc_size_and_align(
973        &self,
974        id: AllocId,
975        msg: CheckInAllocMsg,
976    ) -> InterpResult<'tcx, (Size, Align)> {
977        let info = self.get_alloc_info(id);
978        if matches!(info.kind, AllocKind::Dead) {
979            throw_ub!(PointerUseAfterFree(id, msg))
980        }
981        interp_ok((info.size, info.align))
982    }
983
984    fn get_fn_alloc(&self, id: AllocId) -> Option<FnVal<'tcx, M::ExtraFnVal>> {
985        if let Some(extra) = self.memory.extra_fn_ptr_map.get(&id) {
986            Some(FnVal::Other(*extra))
987        } else {
988            match self.tcx.try_get_global_alloc(id) {
989                Some(GlobalAlloc::Function { instance, .. }) => Some(FnVal::Instance(instance)),
990                _ => None,
991            }
992        }
993    }
994
995    /// Takes a pointer that is the first chunk of a `TypeId` and return the type that its
996    /// provenance refers to, as well as the segment of the hash that this pointer covers.
997    pub fn get_ptr_type_id(
998        &self,
999        ptr: Pointer<Option<M::Provenance>>,
1000    ) -> InterpResult<'tcx, (Ty<'tcx>, u64)> {
1001        let (alloc_id, offset, _meta) = self.ptr_get_alloc_id(ptr, 0)?;
1002        let GlobalAlloc::TypeId { ty } = self.tcx.global_alloc(alloc_id) else {
1003            throw_ub_format!("invalid `TypeId` value: not all bytes carry type id metadata")
1004        };
1005        interp_ok((ty, offset.bytes()))
1006    }
1007
1008    pub fn get_ptr_fn(
1009        &self,
1010        ptr: Pointer<Option<M::Provenance>>,
1011    ) -> InterpResult<'tcx, FnVal<'tcx, M::ExtraFnVal>> {
1012        trace!("get_ptr_fn({:?})", ptr);
1013        let (alloc_id, offset, _prov) = self.ptr_get_alloc_id(ptr, 0)?;
1014        if offset.bytes() != 0 {
1015            throw_ub!(InvalidFunctionPointer(Pointer::new(alloc_id, offset)))
1016        }
1017        self.get_fn_alloc(alloc_id)
1018            .ok_or_else(|| err_ub!(InvalidFunctionPointer(Pointer::new(alloc_id, offset))))
1019            .into()
1020    }
1021
1022    /// Get the dynamic type of the given vtable pointer.
1023    /// If `expected_trait` is `Some`, it must be a vtable for the given trait.
1024    pub fn get_ptr_vtable_ty(
1025        &self,
1026        ptr: Pointer<Option<M::Provenance>>,
1027        expected_trait: Option<&'tcx ty::List<ty::PolyExistentialPredicate<'tcx>>>,
1028    ) -> InterpResult<'tcx, Ty<'tcx>> {
1029        trace!("get_ptr_vtable({:?})", ptr);
1030        let (alloc_id, offset, _tag) = self.ptr_get_alloc_id(ptr, 0)?;
1031        if offset.bytes() != 0 {
1032            throw_ub!(InvalidVTablePointer(Pointer::new(alloc_id, offset)))
1033        }
1034        let Some(GlobalAlloc::VTable(ty, vtable_dyn_type)) =
1035            self.tcx.try_get_global_alloc(alloc_id)
1036        else {
1037            throw_ub!(InvalidVTablePointer(Pointer::new(alloc_id, offset)))
1038        };
1039        if let Some(expected_dyn_type) = expected_trait {
1040            self.check_vtable_for_type(vtable_dyn_type, expected_dyn_type)?;
1041        }
1042        interp_ok(ty)
1043    }
1044
1045    pub fn alloc_mark_immutable(&mut self, id: AllocId) -> InterpResult<'tcx> {
1046        self.get_alloc_raw_mut(id)?.0.mutability = Mutability::Not;
1047        interp_ok(())
1048    }
1049
1050    /// Visit all allocations reachable from the given start set, by recursively traversing the
1051    /// provenance information of those allocations.
1052    pub fn visit_reachable_allocs(
1053        &mut self,
1054        start: Vec<AllocId>,
1055        mut visit: impl FnMut(&mut Self, AllocId, &AllocInfo) -> InterpResult<'tcx>,
1056    ) -> InterpResult<'tcx> {
1057        let mut done = FxHashSet::default();
1058        let mut todo = start;
1059        while let Some(id) = todo.pop() {
1060            if !done.insert(id) {
1061                // We already saw this allocation before, don't process it again.
1062                continue;
1063            }
1064            let info = self.get_alloc_info(id);
1065
1066            // Recurse, if there is data here.
1067            // Do this *before* invoking the callback, as the callback might mutate the
1068            // allocation and e.g. replace all provenance by wildcards!
1069            if matches!(info.kind, AllocKind::LiveData) {
1070                let alloc = self.get_alloc_raw(id)?;
1071                for prov in alloc.provenance().provenances() {
1072                    if let Some(id) = prov.get_alloc_id() {
1073                        todo.push(id);
1074                    }
1075                }
1076            }
1077
1078            // Call the callback.
1079            visit(self, id, &info)?;
1080        }
1081        interp_ok(())
1082    }
1083
1084    /// Create a lazy debug printer that prints the given allocation and all allocations it points
1085    /// to, recursively.
1086    #[must_use]
1087    pub fn dump_alloc<'a>(&'a self, id: AllocId) -> DumpAllocs<'a, 'tcx, M> {
1088        self.dump_allocs(vec![id])
1089    }
1090
1091    /// Create a lazy debug printer for a list of allocations and all allocations they point to,
1092    /// recursively.
1093    #[must_use]
1094    pub fn dump_allocs<'a>(&'a self, mut allocs: Vec<AllocId>) -> DumpAllocs<'a, 'tcx, M> {
1095        allocs.sort();
1096        allocs.dedup();
1097        DumpAllocs { ecx: self, allocs }
1098    }
1099
1100    /// Print the allocation's bytes, without any nested allocations.
1101    pub fn print_alloc_bytes_for_diagnostics(&self, id: AllocId) -> String {
1102        // Using the "raw" access to avoid the `before_alloc_read` hook, we specifically
1103        // want to be able to read all memory for diagnostics, even if that is cyclic.
1104        let alloc = self.get_alloc_raw(id).unwrap();
1105        let mut bytes = String::new();
1106        if alloc.size() != Size::ZERO {
1107            bytes = "\n".into();
1108            // FIXME(translation) there might be pieces that are translatable.
1109            rustc_middle::mir::pretty::write_allocation_bytes(*self.tcx, alloc, &mut bytes, "    ")
1110                .unwrap();
1111        }
1112        bytes
1113    }
1114
1115    /// Find leaked allocations, remove them from memory and return them. Allocations reachable from
1116    /// `static_roots` or a `Global` allocation are not considered leaked, as well as leaks whose
1117    /// kind's `may_leak()` returns true.
1118    ///
1119    /// This is highly destructive, no more execution can happen after this!
1120    pub fn take_leaked_allocations(
1121        &mut self,
1122        static_roots: impl FnOnce(&Self) -> &[AllocId],
1123    ) -> Vec<(AllocId, MemoryKind<M::MemoryKind>, Allocation<M::Provenance, M::AllocExtra, M::Bytes>)>
1124    {
1125        // Collect the set of allocations that are *reachable* from `Global` allocations.
1126        let reachable = {
1127            let mut reachable = FxHashSet::default();
1128            let global_kind = M::GLOBAL_KIND.map(MemoryKind::Machine);
1129            let mut todo: Vec<_> =
1130                self.memory.alloc_map.filter_map_collect(move |&id, &(kind, _)| {
1131                    if Some(kind) == global_kind { Some(id) } else { None }
1132                });
1133            todo.extend(static_roots(self));
1134            while let Some(id) = todo.pop() {
1135                if reachable.insert(id) {
1136                    // This is a new allocation, add the allocations it points to `todo`.
1137                    // We only need to care about `alloc_map` memory here, as entirely unchanged
1138                    // global memory cannot point to memory relevant for the leak check.
1139                    if let Some((_, alloc)) = self.memory.alloc_map.get(id) {
1140                        todo.extend(
1141                            alloc.provenance().provenances().filter_map(|prov| prov.get_alloc_id()),
1142                        );
1143                    }
1144                }
1145            }
1146            reachable
1147        };
1148
1149        // All allocations that are *not* `reachable` and *not* `may_leak` are considered leaking.
1150        let leaked: Vec<_> = self.memory.alloc_map.filter_map_collect(|&id, &(kind, _)| {
1151            if kind.may_leak() || reachable.contains(&id) { None } else { Some(id) }
1152        });
1153        let mut result = Vec::new();
1154        for &id in leaked.iter() {
1155            let (kind, alloc) = self.memory.alloc_map.remove(&id).unwrap();
1156            result.push((id, kind, alloc));
1157        }
1158        result
1159    }
1160
1161    /// Runs the closure in "validation" mode, which means the machine's memory read hooks will be
1162    /// suppressed. Needless to say, this must only be set with great care! Cannot be nested.
1163    ///
1164    /// We do this so Miri's allocation access tracking does not show the validation
1165    /// reads as spurious accesses.
1166    pub fn run_for_validation_mut<R>(&mut self, f: impl FnOnce(&mut Self) -> R) -> R {
1167        // This deliberately uses `==` on `bool` to follow the pattern
1168        // `assert!(val.replace(new) == old)`.
1169        assert!(
1170            self.memory.validation_in_progress.replace(true) == false,
1171            "`validation_in_progress` was already set"
1172        );
1173        let res = f(self);
1174        assert!(
1175            self.memory.validation_in_progress.replace(false) == true,
1176            "`validation_in_progress` was unset by someone else"
1177        );
1178        res
1179    }
1180
1181    /// Runs the closure in "validation" mode, which means the machine's memory read hooks will be
1182    /// suppressed. Needless to say, this must only be set with great care! Cannot be nested.
1183    ///
1184    /// We do this so Miri's allocation access tracking does not show the validation
1185    /// reads as spurious accesses.
1186    pub fn run_for_validation_ref<R>(&self, f: impl FnOnce(&Self) -> R) -> R {
1187        // This deliberately uses `==` on `bool` to follow the pattern
1188        // `assert!(val.replace(new) == old)`.
1189        assert!(
1190            self.memory.validation_in_progress.replace(true) == false,
1191            "`validation_in_progress` was already set"
1192        );
1193        let res = f(self);
1194        assert!(
1195            self.memory.validation_in_progress.replace(false) == true,
1196            "`validation_in_progress` was unset by someone else"
1197        );
1198        res
1199    }
1200
1201    pub(super) fn validation_in_progress(&self) -> bool {
1202        self.memory.validation_in_progress.get()
1203    }
1204}
1205
1206#[doc(hidden)]
1207/// There's no way to use this directly, it's just a helper struct for the `dump_alloc(s)` methods.
1208pub struct DumpAllocs<'a, 'tcx, M: Machine<'tcx>> {
1209    ecx: &'a InterpCx<'tcx, M>,
1210    allocs: Vec<AllocId>,
1211}
1212
1213impl<'a, 'tcx, M: Machine<'tcx>> std::fmt::Debug for DumpAllocs<'a, 'tcx, M> {
1214    fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
1215        // Cannot be a closure because it is generic in `Prov`, `Extra`.
1216        fn write_allocation_track_relocs<'tcx, Prov: Provenance, Extra, Bytes: AllocBytes>(
1217            fmt: &mut std::fmt::Formatter<'_>,
1218            tcx: TyCtxt<'tcx>,
1219            allocs_to_print: &mut VecDeque<AllocId>,
1220            alloc: &Allocation<Prov, Extra, Bytes>,
1221        ) -> std::fmt::Result {
1222            for alloc_id in alloc.provenance().provenances().filter_map(|prov| prov.get_alloc_id())
1223            {
1224                allocs_to_print.push_back(alloc_id);
1225            }
1226            write!(fmt, "{}", display_allocation(tcx, alloc))
1227        }
1228
1229        let mut allocs_to_print: VecDeque<_> = self.allocs.iter().copied().collect();
1230        // `allocs_printed` contains all allocations that we have already printed.
1231        let mut allocs_printed = FxHashSet::default();
1232
1233        while let Some(id) = allocs_to_print.pop_front() {
1234            if !allocs_printed.insert(id) {
1235                // Already printed, so skip this.
1236                continue;
1237            }
1238
1239            write!(fmt, "{id:?}")?;
1240            match self.ecx.memory.alloc_map.get(id) {
1241                Some((kind, alloc)) => {
1242                    // normal alloc
1243                    write!(fmt, " ({kind}, ")?;
1244                    write_allocation_track_relocs(
1245                        &mut *fmt,
1246                        *self.ecx.tcx,
1247                        &mut allocs_to_print,
1248                        alloc,
1249                    )?;
1250                }
1251                None => {
1252                    // global alloc
1253                    match self.ecx.tcx.try_get_global_alloc(id) {
1254                        Some(GlobalAlloc::Memory(alloc)) => {
1255                            write!(fmt, " (unchanged global, ")?;
1256                            write_allocation_track_relocs(
1257                                &mut *fmt,
1258                                *self.ecx.tcx,
1259                                &mut allocs_to_print,
1260                                alloc.inner(),
1261                            )?;
1262                        }
1263                        Some(GlobalAlloc::Function { instance, .. }) => {
1264                            write!(fmt, " (fn: {instance})")?;
1265                        }
1266                        Some(GlobalAlloc::VTable(ty, dyn_ty)) => {
1267                            write!(fmt, " (vtable: impl {dyn_ty} for {ty})")?;
1268                        }
1269                        Some(GlobalAlloc::TypeId { ty }) => {
1270                            write!(fmt, " (typeid for {ty})")?;
1271                        }
1272                        Some(GlobalAlloc::Static(did)) => {
1273                            write!(fmt, " (static: {})", self.ecx.tcx.def_path_str(did))?;
1274                        }
1275                        None => {
1276                            write!(fmt, " (deallocated)")?;
1277                        }
1278                    }
1279                }
1280            }
1281            writeln!(fmt)?;
1282        }
1283        Ok(())
1284    }
1285}
1286
1287/// Reading and writing.
1288impl<'a, 'tcx, Prov: Provenance, Extra, Bytes: AllocBytes>
1289    AllocRefMut<'a, 'tcx, Prov, Extra, Bytes>
1290{
1291    pub fn as_ref<'b>(&'b self) -> AllocRef<'b, 'tcx, Prov, Extra, Bytes> {
1292        AllocRef { alloc: self.alloc, range: self.range, tcx: self.tcx, alloc_id: self.alloc_id }
1293    }
1294
1295    /// `range` is relative to this allocation reference, not the base of the allocation.
1296    pub fn write_scalar(&mut self, range: AllocRange, val: Scalar<Prov>) -> InterpResult<'tcx> {
1297        let range = self.range.subrange(range);
1298        debug!("write_scalar at {:?}{range:?}: {val:?}", self.alloc_id);
1299
1300        self.alloc
1301            .write_scalar(&self.tcx, range, val)
1302            .map_err(|e| e.to_interp_error(self.alloc_id))
1303            .into()
1304    }
1305
1306    /// `offset` is relative to this allocation reference, not the base of the allocation.
1307    pub fn write_ptr_sized(&mut self, offset: Size, val: Scalar<Prov>) -> InterpResult<'tcx> {
1308        self.write_scalar(alloc_range(offset, self.tcx.data_layout().pointer_size()), val)
1309    }
1310
1311    /// Mark the given sub-range (relative to this allocation reference) as uninitialized.
1312    pub fn write_uninit(&mut self, range: AllocRange) -> InterpResult<'tcx> {
1313        let range = self.range.subrange(range);
1314
1315        self.alloc
1316            .write_uninit(&self.tcx, range)
1317            .map_err(|e| e.to_interp_error(self.alloc_id))
1318            .into()
1319    }
1320
1321    /// Mark the entire referenced range as uninitialized
1322    pub fn write_uninit_full(&mut self) -> InterpResult<'tcx> {
1323        self.alloc
1324            .write_uninit(&self.tcx, self.range)
1325            .map_err(|e| e.to_interp_error(self.alloc_id))
1326            .into()
1327    }
1328
1329    /// Remove all provenance in the reference range.
1330    pub fn clear_provenance(&mut self) -> InterpResult<'tcx> {
1331        self.alloc
1332            .clear_provenance(&self.tcx, self.range)
1333            .map_err(|e| e.to_interp_error(self.alloc_id))
1334            .into()
1335    }
1336}
1337
1338impl<'a, 'tcx, Prov: Provenance, Extra, Bytes: AllocBytes> AllocRef<'a, 'tcx, Prov, Extra, Bytes> {
1339    /// `range` is relative to this allocation reference, not the base of the allocation.
1340    pub fn read_scalar(
1341        &self,
1342        range: AllocRange,
1343        read_provenance: bool,
1344    ) -> InterpResult<'tcx, Scalar<Prov>> {
1345        let range = self.range.subrange(range);
1346        self.alloc
1347            .read_scalar(&self.tcx, range, read_provenance)
1348            .map_err(|e| e.to_interp_error(self.alloc_id))
1349            .into()
1350    }
1351
1352    /// `range` is relative to this allocation reference, not the base of the allocation.
1353    pub fn read_integer(&self, range: AllocRange) -> InterpResult<'tcx, Scalar<Prov>> {
1354        self.read_scalar(range, /*read_provenance*/ false)
1355    }
1356
1357    /// `offset` is relative to this allocation reference, not the base of the allocation.
1358    pub fn read_pointer(&self, offset: Size) -> InterpResult<'tcx, Scalar<Prov>> {
1359        self.read_scalar(
1360            alloc_range(offset, self.tcx.data_layout().pointer_size()),
1361            /*read_provenance*/ true,
1362        )
1363    }
1364
1365    /// `range` is relative to this allocation reference, not the base of the allocation.
1366    pub fn get_bytes_strip_provenance<'b>(&'b self) -> InterpResult<'tcx, &'a [u8]> {
1367        self.alloc
1368            .get_bytes_strip_provenance(&self.tcx, self.range)
1369            .map_err(|e| e.to_interp_error(self.alloc_id))
1370            .into()
1371    }
1372
1373    /// Returns whether the allocation has provenance anywhere in the range of the `AllocRef`.
1374    pub fn has_provenance(&self) -> bool {
1375        !self.alloc.provenance().range_empty(self.range, &self.tcx)
1376    }
1377}
1378
1379impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
1380    /// Reads the given number of bytes from memory, and strips their provenance if possible.
1381    /// Returns them as a slice.
1382    ///
1383    /// Performs appropriate bounds checks.
1384    pub fn read_bytes_ptr_strip_provenance(
1385        &self,
1386        ptr: Pointer<Option<M::Provenance>>,
1387        size: Size,
1388    ) -> InterpResult<'tcx, &[u8]> {
1389        let Some(alloc_ref) = self.get_ptr_alloc(ptr, size)? else {
1390            // zero-sized access
1391            return interp_ok(&[]);
1392        };
1393        // Side-step AllocRef and directly access the underlying bytes more efficiently.
1394        // (We are staying inside the bounds here so all is good.)
1395        interp_ok(
1396            alloc_ref
1397                .alloc
1398                .get_bytes_strip_provenance(&alloc_ref.tcx, alloc_ref.range)
1399                .map_err(|e| e.to_interp_error(alloc_ref.alloc_id))?,
1400        )
1401    }
1402
1403    /// Writes the given stream of bytes into memory.
1404    ///
1405    /// Performs appropriate bounds checks.
1406    pub fn write_bytes_ptr(
1407        &mut self,
1408        ptr: Pointer<Option<M::Provenance>>,
1409        src: impl IntoIterator<Item = u8>,
1410    ) -> InterpResult<'tcx> {
1411        let mut src = src.into_iter();
1412        let (lower, upper) = src.size_hint();
1413        let len = upper.expect("can only write bounded iterators");
1414        assert_eq!(lower, len, "can only write iterators with a precise length");
1415
1416        let size = Size::from_bytes(len);
1417        let Some(alloc_ref) = self.get_ptr_alloc_mut(ptr, size)? else {
1418            // zero-sized access
1419            assert_matches!(src.next(), None, "iterator said it was empty but returned an element");
1420            return interp_ok(());
1421        };
1422
1423        // Side-step AllocRef and directly access the underlying bytes more efficiently.
1424        // (We are staying inside the bounds here and all bytes do get overwritten so all is good.)
1425        let alloc_id = alloc_ref.alloc_id;
1426        let bytes = alloc_ref
1427            .alloc
1428            .get_bytes_unchecked_for_overwrite(&alloc_ref.tcx, alloc_ref.range)
1429            .map_err(move |e| e.to_interp_error(alloc_id))?;
1430        // `zip` would stop when the first iterator ends; we want to definitely
1431        // cover all of `bytes`.
1432        for dest in bytes {
1433            *dest = src.next().expect("iterator was shorter than it said it would be");
1434        }
1435        assert_matches!(src.next(), None, "iterator was longer than it said it would be");
1436        interp_ok(())
1437    }
1438
1439    pub fn mem_copy(
1440        &mut self,
1441        src: Pointer<Option<M::Provenance>>,
1442        dest: Pointer<Option<M::Provenance>>,
1443        size: Size,
1444        nonoverlapping: bool,
1445    ) -> InterpResult<'tcx> {
1446        self.mem_copy_repeatedly(src, dest, size, 1, nonoverlapping)
1447    }
1448
1449    /// Performs `num_copies` many copies of `size` many bytes from `src` to `dest + i*size` (where
1450    /// `i` is the index of the copy).
1451    ///
1452    /// Either `nonoverlapping` must be true or `num_copies` must be 1; doing repeated copies that
1453    /// may overlap is not supported.
1454    pub fn mem_copy_repeatedly(
1455        &mut self,
1456        src: Pointer<Option<M::Provenance>>,
1457        dest: Pointer<Option<M::Provenance>>,
1458        size: Size,
1459        num_copies: u64,
1460        nonoverlapping: bool,
1461    ) -> InterpResult<'tcx> {
1462        let tcx = self.tcx;
1463        // We need to do our own bounds-checks.
1464        let src_parts = self.get_ptr_access(src, size)?;
1465        let dest_parts = self.get_ptr_access(dest, size * num_copies)?; // `Size` multiplication
1466
1467        // Similar to `get_ptr_alloc`, we need to call `before_alloc_access` even for zero-sized
1468        // reads. However, just like in `get_ptr_alloc_mut`, the write part is okay to skip for
1469        // zero-sized writes.
1470        if let Ok((alloc_id, ..)) = self.ptr_try_get_alloc_id(src, size.bytes().try_into().unwrap())
1471        {
1472            M::before_alloc_access(tcx, &self.machine, alloc_id)?;
1473        }
1474
1475        // FIXME: we look up both allocations twice here, once before for the `check_ptr_access`
1476        // and once below to get the underlying `&[mut] Allocation`.
1477
1478        // Source alloc preparations and access hooks.
1479        let Some((src_alloc_id, src_offset, src_prov)) = src_parts else {
1480            // Zero-sized *source*, that means dest is also zero-sized and we have nothing to do.
1481            return interp_ok(());
1482        };
1483        let src_alloc = self.get_alloc_raw(src_alloc_id)?;
1484        let src_range = alloc_range(src_offset, size);
1485        assert!(!self.memory.validation_in_progress.get(), "we can't be copying during validation");
1486
1487        // Trigger read hook.
1488        // For the overlapping case, it is crucial that we trigger the read hook
1489        // before the write hook -- the aliasing model cares about the order.
1490        M::before_memory_read(
1491            tcx,
1492            &self.machine,
1493            &src_alloc.extra,
1494            src,
1495            (src_alloc_id, src_prov),
1496            src_range,
1497        )?;
1498        // We need the `dest` ptr for the next operation, so we get it now.
1499        // We already did the source checks and called the hooks so we are good to return early.
1500        let Some((dest_alloc_id, dest_offset, dest_prov)) = dest_parts else {
1501            // Zero-sized *destination*.
1502            return interp_ok(());
1503        };
1504
1505        // Prepare getting source provenance.
1506        let src_bytes = src_alloc.get_bytes_unchecked(src_range).as_ptr(); // raw ptr, so we can also get a ptr to the destination allocation
1507        // first copy the provenance to a temporary buffer, because
1508        // `get_bytes_mut` will clear the provenance, which is correct,
1509        // since we don't want to keep any provenance at the target.
1510        // This will also error if copying partial provenance is not supported.
1511        let provenance = src_alloc
1512            .provenance()
1513            .prepare_copy(src_range, dest_offset, num_copies, self)
1514            .map_err(|e| e.to_interp_error(src_alloc_id))?;
1515        // Prepare a copy of the initialization mask.
1516        let init = src_alloc.init_mask().prepare_copy(src_range);
1517
1518        // Destination alloc preparations...
1519        let (dest_alloc, machine) = self.get_alloc_raw_mut(dest_alloc_id)?;
1520        let dest_range = alloc_range(dest_offset, size * num_copies);
1521        // ...and access hooks.
1522        M::before_alloc_access(tcx, machine, dest_alloc_id)?;
1523        M::before_memory_write(
1524            tcx,
1525            machine,
1526            &mut dest_alloc.extra,
1527            dest,
1528            (dest_alloc_id, dest_prov),
1529            dest_range,
1530        )?;
1531        // Yes we do overwrite all bytes in `dest_bytes`.
1532        let dest_bytes = dest_alloc
1533            .get_bytes_unchecked_for_overwrite_ptr(&tcx, dest_range)
1534            .map_err(|e| e.to_interp_error(dest_alloc_id))?
1535            .as_mut_ptr();
1536
1537        if init.no_bytes_init() {
1538            // Fast path: If all bytes are `uninit` then there is nothing to copy. The target range
1539            // is marked as uninitialized but we otherwise omit changing the byte representation which may
1540            // be arbitrary for uninitialized bytes.
1541            // This also avoids writing to the target bytes so that the backing allocation is never
1542            // touched if the bytes stay uninitialized for the whole interpreter execution. On contemporary
1543            // operating system this can avoid physically allocating the page.
1544            dest_alloc
1545                .write_uninit(&tcx, dest_range)
1546                .map_err(|e| e.to_interp_error(dest_alloc_id))?;
1547            // `write_uninit` also resets the provenance, so we are done.
1548            return interp_ok(());
1549        }
1550
1551        // SAFE: The above indexing would have panicked if there weren't at least `size` bytes
1552        // behind `src` and `dest`. Also, we use the overlapping-safe `ptr::copy` if `src` and
1553        // `dest` could possibly overlap.
1554        // The pointers above remain valid even if the `HashMap` table is moved around because they
1555        // point into the `Vec` storing the bytes.
1556        unsafe {
1557            if src_alloc_id == dest_alloc_id {
1558                if nonoverlapping {
1559                    // `Size` additions
1560                    if (src_offset <= dest_offset && src_offset + size > dest_offset)
1561                        || (dest_offset <= src_offset && dest_offset + size > src_offset)
1562                    {
1563                        throw_ub_custom!(fluent::const_eval_copy_nonoverlapping_overlapping);
1564                    }
1565                }
1566            }
1567            if num_copies > 1 {
1568                assert!(nonoverlapping, "multi-copy only supported in non-overlapping mode");
1569            }
1570
1571            let size_in_bytes = size.bytes_usize();
1572            // For particularly large arrays (where this is perf-sensitive) it's common that
1573            // we're writing a single byte repeatedly. So, optimize that case to a memset.
1574            if size_in_bytes == 1 {
1575                debug_assert!(num_copies >= 1); // we already handled the zero-sized cases above.
1576                // SAFETY: `src_bytes` would be read from anyway by `copy` below (num_copies >= 1).
1577                let value = *src_bytes;
1578                dest_bytes.write_bytes(value, (size * num_copies).bytes_usize());
1579            } else if src_alloc_id == dest_alloc_id {
1580                let mut dest_ptr = dest_bytes;
1581                for _ in 0..num_copies {
1582                    // Here we rely on `src` and `dest` being non-overlapping if there is more than
1583                    // one copy.
1584                    ptr::copy(src_bytes, dest_ptr, size_in_bytes);
1585                    dest_ptr = dest_ptr.add(size_in_bytes);
1586                }
1587            } else {
1588                let mut dest_ptr = dest_bytes;
1589                for _ in 0..num_copies {
1590                    ptr::copy_nonoverlapping(src_bytes, dest_ptr, size_in_bytes);
1591                    dest_ptr = dest_ptr.add(size_in_bytes);
1592                }
1593            }
1594        }
1595
1596        // now fill in all the "init" data
1597        dest_alloc.init_mask_apply_copy(
1598            init,
1599            alloc_range(dest_offset, size), // just a single copy (i.e., not full `dest_range`)
1600            num_copies,
1601        );
1602        // copy the provenance to the destination
1603        dest_alloc.provenance_apply_copy(provenance);
1604
1605        interp_ok(())
1606    }
1607}
1608
1609/// Machine pointer introspection.
1610impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
1611    /// Test if this value might be null.
1612    /// If the machine does not support ptr-to-int casts, this is conservative.
1613    pub fn scalar_may_be_null(&self, scalar: Scalar<M::Provenance>) -> InterpResult<'tcx, bool> {
1614        match scalar.try_to_scalar_int() {
1615            Ok(int) => interp_ok(int.is_null()),
1616            Err(_) => {
1617                // We can't cast this pointer to an integer. Can only happen during CTFE.
1618                let ptr = scalar.to_pointer(self)?;
1619                match self.ptr_try_get_alloc_id(ptr, 0) {
1620                    Ok((alloc_id, offset, _)) => {
1621                        let info = self.get_alloc_info(alloc_id);
1622                        // If the pointer is in-bounds (including "at the end"), it is definitely not null.
1623                        if offset <= info.size {
1624                            return interp_ok(false);
1625                        }
1626                        // If the allocation is N-aligned, and the offset is not divisible by N,
1627                        // then `base + offset` has a non-zero remainder after division by `N`,
1628                        // which means `base + offset` cannot be null.
1629                        if !offset.bytes().is_multiple_of(info.align.bytes()) {
1630                            return interp_ok(false);
1631                        }
1632                        // We don't know enough, this might be null.
1633                        interp_ok(true)
1634                    }
1635                    Err(_offset) => bug!("a non-int scalar is always a pointer"),
1636                }
1637            }
1638        }
1639    }
1640
1641    /// Turning a "maybe pointer" into a proper pointer (and some information
1642    /// about where it points), or an absolute address.
1643    ///
1644    /// `size` says how many bytes of memory are expected at that pointer. This is largely only used
1645    /// for error messages; however, the *sign* of `size` can be used to disambiguate situations
1646    /// where a wildcard pointer sits right in between two allocations.
1647    /// It is almost always okay to just set the size to 0; this will be treated like a positive size
1648    /// for handling wildcard pointers.
1649    ///
1650    /// The result must be used immediately; it is not allowed to convert
1651    /// the returned data back into a `Pointer` and store that in machine state.
1652    /// (In fact that's not even possible since `M::ProvenanceExtra` is generic and
1653    /// we don't have an operation to turn it back into `M::Provenance`.)
1654    pub fn ptr_try_get_alloc_id(
1655        &self,
1656        ptr: Pointer<Option<M::Provenance>>,
1657        size: i64,
1658    ) -> Result<(AllocId, Size, M::ProvenanceExtra), u64> {
1659        match ptr.into_pointer_or_addr() {
1660            Ok(ptr) => match M::ptr_get_alloc(self, ptr, size) {
1661                Some((alloc_id, offset, extra)) => Ok((alloc_id, offset, extra)),
1662                None => {
1663                    assert!(M::Provenance::OFFSET_IS_ADDR);
1664                    // Offset is absolute, as we just asserted.
1665                    let (_, addr) = ptr.into_raw_parts();
1666                    Err(addr.bytes())
1667                }
1668            },
1669            Err(addr) => Err(addr.bytes()),
1670        }
1671    }
1672
1673    /// Turning a "maybe pointer" into a proper pointer (and some information about where it points).
1674    ///
1675    /// `size` says how many bytes of memory are expected at that pointer. This is largely only used
1676    /// for error messages; however, the *sign* of `size` can be used to disambiguate situations
1677    /// where a wildcard pointer sits right in between two allocations.
1678    /// It is almost always okay to just set the size to 0; this will be treated like a positive size
1679    /// for handling wildcard pointers.
1680    ///
1681    /// The result must be used immediately; it is not allowed to convert
1682    /// the returned data back into a `Pointer` and store that in machine state.
1683    /// (In fact that's not even possible since `M::ProvenanceExtra` is generic and
1684    /// we don't have an operation to turn it back into `M::Provenance`.)
1685    #[inline(always)]
1686    pub fn ptr_get_alloc_id(
1687        &self,
1688        ptr: Pointer<Option<M::Provenance>>,
1689        size: i64,
1690    ) -> InterpResult<'tcx, (AllocId, Size, M::ProvenanceExtra)> {
1691        self.ptr_try_get_alloc_id(ptr, size)
1692            .map_err(|offset| {
1693                err_ub!(DanglingIntPointer {
1694                    addr: offset,
1695                    inbounds_size: size,
1696                    msg: CheckInAllocMsg::Dereferenceable
1697                })
1698            })
1699            .into()
1700    }
1701}