miri/
machine.rs

1//! Global machine state as well as implementation of the interpreter engine
2//! `Machine` trait.
3
4use std::any::Any;
5use std::borrow::Cow;
6use std::cell::{Cell, RefCell};
7use std::collections::hash_map::Entry;
8use std::path::Path;
9use std::{fmt, process};
10
11use rand::rngs::StdRng;
12use rand::{Rng, SeedableRng};
13use rustc_abi::{Align, ExternAbi, Size};
14use rustc_apfloat::{Float, FloatConvert};
15use rustc_attr_parsing::InlineAttr;
16use rustc_data_structures::fx::{FxHashMap, FxHashSet};
17#[allow(unused)]
18use rustc_data_structures::static_assert_size;
19use rustc_middle::mir;
20use rustc_middle::query::TyCtxtAt;
21use rustc_middle::ty::layout::{
22    HasTyCtxt, HasTypingEnv, LayoutCx, LayoutError, LayoutOf, TyAndLayout,
23};
24use rustc_middle::ty::{self, Instance, Ty, TyCtxt};
25use rustc_session::config::InliningThreshold;
26use rustc_span::def_id::{CrateNum, DefId};
27use rustc_span::{Span, SpanData, Symbol};
28use rustc_target::callconv::FnAbi;
29
30use crate::concurrency::cpu_affinity::{self, CpuAffinityMask};
31use crate::concurrency::data_race::{self, NaReadType, NaWriteType};
32use crate::concurrency::weak_memory;
33use crate::*;
34
35/// First real-time signal.
36/// `signal(7)` says this must be between 32 and 64 and specifies 34 or 35
37/// as typical values.
38pub const SIGRTMIN: i32 = 34;
39
40/// Last real-time signal.
41/// `signal(7)` says it must be between 32 and 64 and specifies
42/// `SIGRTMAX` - `SIGRTMIN` >= 8 (which is the value of `_POSIX_RTSIG_MAX`)
43pub const SIGRTMAX: i32 = 42;
44
45/// Each anonymous global (constant, vtable, function pointer, ...) has multiple addresses, but only
46/// this many. Since const allocations are never deallocated, choosing a new [`AllocId`] and thus
47/// base address for each evaluation would produce unbounded memory usage.
48const ADDRS_PER_ANON_GLOBAL: usize = 32;
49
50/// Extra data stored with each stack frame
51pub struct FrameExtra<'tcx> {
52    /// Extra data for the Borrow Tracker.
53    pub borrow_tracker: Option<borrow_tracker::FrameState>,
54
55    /// If this is Some(), then this is a special "catch unwind" frame (the frame of `try_fn`
56    /// called by `try`). When this frame is popped during unwinding a panic,
57    /// we stop unwinding, use the `CatchUnwindData` to handle catching.
58    pub catch_unwind: Option<CatchUnwindData<'tcx>>,
59
60    /// If `measureme` profiling is enabled, holds timing information
61    /// for the start of this frame. When we finish executing this frame,
62    /// we use this to register a completed event with `measureme`.
63    pub timing: Option<measureme::DetachedTiming>,
64
65    /// Indicates whether a `Frame` is part of a workspace-local crate and is also not
66    /// `#[track_caller]`. We compute this once on creation and store the result, as an
67    /// optimization.
68    /// This is used by `MiriMachine::current_span` and `MiriMachine::caller_span`
69    pub is_user_relevant: bool,
70
71    /// We have a cache for the mapping from [`mir::Const`] to resulting [`AllocId`].
72    /// However, we don't want all frames to always get the same result, so we insert
73    /// an additional bit of "salt" into the cache key. This salt is fixed per-frame
74    /// so that within a call, a const will have a stable address.
75    salt: usize,
76
77    /// Data race detector per-frame data.
78    pub data_race: Option<data_race::FrameState>,
79}
80
81impl<'tcx> std::fmt::Debug for FrameExtra<'tcx> {
82    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
83        // Omitting `timing`, it does not support `Debug`.
84        let FrameExtra {
85            borrow_tracker,
86            catch_unwind,
87            timing: _,
88            is_user_relevant,
89            salt,
90            data_race,
91        } = self;
92        f.debug_struct("FrameData")
93            .field("borrow_tracker", borrow_tracker)
94            .field("catch_unwind", catch_unwind)
95            .field("is_user_relevant", is_user_relevant)
96            .field("salt", salt)
97            .field("data_race", data_race)
98            .finish()
99    }
100}
101
102impl VisitProvenance for FrameExtra<'_> {
103    fn visit_provenance(&self, visit: &mut VisitWith<'_>) {
104        let FrameExtra {
105            catch_unwind,
106            borrow_tracker,
107            timing: _,
108            is_user_relevant: _,
109            salt: _,
110            data_race: _,
111        } = self;
112
113        catch_unwind.visit_provenance(visit);
114        borrow_tracker.visit_provenance(visit);
115    }
116}
117
118/// Extra memory kinds
119#[derive(Debug, Copy, Clone, PartialEq, Eq)]
120pub enum MiriMemoryKind {
121    /// `__rust_alloc` memory.
122    Rust,
123    /// `miri_alloc` memory.
124    Miri,
125    /// `malloc` memory.
126    C,
127    /// Windows `HeapAlloc` memory.
128    WinHeap,
129    /// Windows "local" memory (to be freed with `LocalFree`)
130    WinLocal,
131    /// Memory for args, errno, and other parts of the machine-managed environment.
132    /// This memory may leak.
133    Machine,
134    /// Memory allocated by the runtime (e.g. env vars). Separate from `Machine`
135    /// because we clean it up and leak-check it.
136    Runtime,
137    /// Globals copied from `tcx`.
138    /// This memory may leak.
139    Global,
140    /// Memory for extern statics.
141    /// This memory may leak.
142    ExternStatic,
143    /// Memory for thread-local statics.
144    /// This memory may leak.
145    Tls,
146    /// Memory mapped directly by the program
147    Mmap,
148}
149
150impl From<MiriMemoryKind> for MemoryKind {
151    #[inline(always)]
152    fn from(kind: MiriMemoryKind) -> MemoryKind {
153        MemoryKind::Machine(kind)
154    }
155}
156
157impl MayLeak for MiriMemoryKind {
158    #[inline(always)]
159    fn may_leak(self) -> bool {
160        use self::MiriMemoryKind::*;
161        match self {
162            Rust | Miri | C | WinHeap | WinLocal | Runtime => false,
163            Machine | Global | ExternStatic | Tls | Mmap => true,
164        }
165    }
166}
167
168impl MiriMemoryKind {
169    /// Whether we have a useful allocation span for an allocation of this kind.
170    fn should_save_allocation_span(self) -> bool {
171        use self::MiriMemoryKind::*;
172        match self {
173            // Heap allocations are fine since the `Allocation` is created immediately.
174            Rust | Miri | C | WinHeap | WinLocal | Mmap => true,
175            // Everything else is unclear, let's not show potentially confusing spans.
176            Machine | Global | ExternStatic | Tls | Runtime => false,
177        }
178    }
179}
180
181impl fmt::Display for MiriMemoryKind {
182    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
183        use self::MiriMemoryKind::*;
184        match self {
185            Rust => write!(f, "Rust heap"),
186            Miri => write!(f, "Miri bare-metal heap"),
187            C => write!(f, "C heap"),
188            WinHeap => write!(f, "Windows heap"),
189            WinLocal => write!(f, "Windows local memory"),
190            Machine => write!(f, "machine-managed memory"),
191            Runtime => write!(f, "language runtime memory"),
192            Global => write!(f, "global (static or const)"),
193            ExternStatic => write!(f, "extern static"),
194            Tls => write!(f, "thread-local static"),
195            Mmap => write!(f, "mmap"),
196        }
197    }
198}
199
200pub type MemoryKind = interpret::MemoryKind<MiriMemoryKind>;
201
202/// Pointer provenance.
203// This needs to be `Eq`+`Hash` because the `Machine` trait needs that because validity checking
204// *might* be recursive and then it has to track which places have already been visited.
205// These implementations are a bit questionable, and it means we may check the same place multiple
206// times with different provenance, but that is in general not wrong.
207#[derive(Clone, Copy, PartialEq, Eq, Hash)]
208pub enum Provenance {
209    /// For pointers with concrete provenance. we exactly know which allocation they are attached to
210    /// and what their borrow tag is.
211    Concrete {
212        alloc_id: AllocId,
213        /// Borrow Tracker tag.
214        tag: BorTag,
215    },
216    /// Pointers with wildcard provenance are created on int-to-ptr casts. According to the
217    /// specification, we should at that point angelically "guess" a provenance that will make all
218    /// future uses of this pointer work, if at all possible. Of course such a semantics cannot be
219    /// actually implemented in Miri. So instead, we approximate this, erroring on the side of
220    /// accepting too much code rather than rejecting correct code: a pointer with wildcard
221    /// provenance "acts like" any previously exposed pointer. Each time it is used, we check
222    /// whether *some* exposed pointer could have done what we want to do, and if the answer is yes
223    /// then we allow the access. This allows too much code in two ways:
224    /// - The same wildcard pointer can "take the role" of multiple different exposed pointers on
225    ///   subsequent memory accesses.
226    /// - In the aliasing model, we don't just have to know the borrow tag of the pointer used for
227    ///   the access, we also have to update the aliasing state -- and that update can be very
228    ///   different depending on which borrow tag we pick! Stacked Borrows has support for this by
229    ///   switching to a stack that is only approximately known, i.e. we over-approximate the effect
230    ///   of using *any* exposed pointer for this access, and only keep information about the borrow
231    ///   stack that would be true with all possible choices.
232    Wildcard,
233}
234
235/// The "extra" information a pointer has over a regular AllocId.
236#[derive(Copy, Clone, PartialEq)]
237pub enum ProvenanceExtra {
238    Concrete(BorTag),
239    Wildcard,
240}
241
242#[cfg(target_pointer_width = "64")]
243static_assert_size!(StrictPointer, 24);
244// FIXME: this would with in 24bytes but layout optimizations are not smart enough
245// #[cfg(target_pointer_width = "64")]
246//static_assert_size!(Pointer, 24);
247#[cfg(target_pointer_width = "64")]
248static_assert_size!(Scalar, 32);
249
250impl fmt::Debug for Provenance {
251    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
252        match self {
253            Provenance::Concrete { alloc_id, tag } => {
254                // Forward `alternate` flag to `alloc_id` printing.
255                if f.alternate() {
256                    write!(f, "[{alloc_id:#?}]")?;
257                } else {
258                    write!(f, "[{alloc_id:?}]")?;
259                }
260                // Print Borrow Tracker tag.
261                write!(f, "{tag:?}")?;
262            }
263            Provenance::Wildcard => {
264                write!(f, "[wildcard]")?;
265            }
266        }
267        Ok(())
268    }
269}
270
271impl interpret::Provenance for Provenance {
272    /// We use absolute addresses in the `offset` of a `StrictPointer`.
273    const OFFSET_IS_ADDR: bool = true;
274
275    /// Miri implements wildcard provenance.
276    const WILDCARD: Option<Self> = Some(Provenance::Wildcard);
277
278    fn get_alloc_id(self) -> Option<AllocId> {
279        match self {
280            Provenance::Concrete { alloc_id, .. } => Some(alloc_id),
281            Provenance::Wildcard => None,
282        }
283    }
284
285    fn fmt(ptr: &interpret::Pointer<Self>, f: &mut fmt::Formatter<'_>) -> fmt::Result {
286        let (prov, addr) = ptr.into_parts(); // address is absolute
287        write!(f, "{:#x}", addr.bytes())?;
288        if f.alternate() {
289            write!(f, "{prov:#?}")?;
290        } else {
291            write!(f, "{prov:?}")?;
292        }
293        Ok(())
294    }
295
296    fn join(left: Option<Self>, right: Option<Self>) -> Option<Self> {
297        match (left, right) {
298            // If both are the *same* concrete tag, that is the result.
299            (
300                Some(Provenance::Concrete { alloc_id: left_alloc, tag: left_tag }),
301                Some(Provenance::Concrete { alloc_id: right_alloc, tag: right_tag }),
302            ) if left_alloc == right_alloc && left_tag == right_tag => left,
303            // If one side is a wildcard, the best possible outcome is that it is equal to the other
304            // one, and we use that.
305            (Some(Provenance::Wildcard), o) | (o, Some(Provenance::Wildcard)) => o,
306            // Otherwise, fall back to `None`.
307            _ => None,
308        }
309    }
310}
311
312impl fmt::Debug for ProvenanceExtra {
313    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
314        match self {
315            ProvenanceExtra::Concrete(pid) => write!(f, "{pid:?}"),
316            ProvenanceExtra::Wildcard => write!(f, "<wildcard>"),
317        }
318    }
319}
320
321impl ProvenanceExtra {
322    pub fn and_then<T>(self, f: impl FnOnce(BorTag) -> Option<T>) -> Option<T> {
323        match self {
324            ProvenanceExtra::Concrete(pid) => f(pid),
325            ProvenanceExtra::Wildcard => None,
326        }
327    }
328}
329
330/// Extra per-allocation data
331#[derive(Debug)]
332pub struct AllocExtra<'tcx> {
333    /// Global state of the borrow tracker, if enabled.
334    pub borrow_tracker: Option<borrow_tracker::AllocState>,
335    /// Data race detection via the use of a vector-clock.
336    /// This is only added if it is enabled.
337    pub data_race: Option<data_race::AllocState>,
338    /// Weak memory emulation via the use of store buffers.
339    /// This is only added if it is enabled.
340    pub weak_memory: Option<weak_memory::AllocState>,
341    /// A backtrace to where this allocation was allocated.
342    /// As this is recorded for leak reports, it only exists
343    /// if this allocation is leakable. The backtrace is not
344    /// pruned yet; that should be done before printing it.
345    pub backtrace: Option<Vec<FrameInfo<'tcx>>>,
346    /// Synchronization primitives like to attach extra data to particular addresses. We store that
347    /// inside the relevant allocation, to ensure that everything is removed when the allocation is
348    /// freed.
349    /// This maps offsets to synchronization-primitive-specific data.
350    pub sync: FxHashMap<Size, Box<dyn Any>>,
351}
352
353// We need a `Clone` impl because the machine passes `Allocation` through `Cow`...
354// but that should never end up actually cloning our `AllocExtra`.
355impl<'tcx> Clone for AllocExtra<'tcx> {
356    fn clone(&self) -> Self {
357        panic!("our allocations should never be cloned");
358    }
359}
360
361impl VisitProvenance for AllocExtra<'_> {
362    fn visit_provenance(&self, visit: &mut VisitWith<'_>) {
363        let AllocExtra { borrow_tracker, data_race, weak_memory, backtrace: _, sync: _ } = self;
364
365        borrow_tracker.visit_provenance(visit);
366        data_race.visit_provenance(visit);
367        weak_memory.visit_provenance(visit);
368    }
369}
370
371/// Precomputed layouts of primitive types
372pub struct PrimitiveLayouts<'tcx> {
373    pub unit: TyAndLayout<'tcx>,
374    pub i8: TyAndLayout<'tcx>,
375    pub i16: TyAndLayout<'tcx>,
376    pub i32: TyAndLayout<'tcx>,
377    pub i64: TyAndLayout<'tcx>,
378    pub i128: TyAndLayout<'tcx>,
379    pub isize: TyAndLayout<'tcx>,
380    pub u8: TyAndLayout<'tcx>,
381    pub u16: TyAndLayout<'tcx>,
382    pub u32: TyAndLayout<'tcx>,
383    pub u64: TyAndLayout<'tcx>,
384    pub u128: TyAndLayout<'tcx>,
385    pub usize: TyAndLayout<'tcx>,
386    pub bool: TyAndLayout<'tcx>,
387    pub mut_raw_ptr: TyAndLayout<'tcx>,   // *mut ()
388    pub const_raw_ptr: TyAndLayout<'tcx>, // *const ()
389}
390
391impl<'tcx> PrimitiveLayouts<'tcx> {
392    fn new(layout_cx: LayoutCx<'tcx>) -> Result<Self, &'tcx LayoutError<'tcx>> {
393        let tcx = layout_cx.tcx();
394        let mut_raw_ptr = Ty::new_mut_ptr(tcx, tcx.types.unit);
395        let const_raw_ptr = Ty::new_imm_ptr(tcx, tcx.types.unit);
396        Ok(Self {
397            unit: layout_cx.layout_of(tcx.types.unit)?,
398            i8: layout_cx.layout_of(tcx.types.i8)?,
399            i16: layout_cx.layout_of(tcx.types.i16)?,
400            i32: layout_cx.layout_of(tcx.types.i32)?,
401            i64: layout_cx.layout_of(tcx.types.i64)?,
402            i128: layout_cx.layout_of(tcx.types.i128)?,
403            isize: layout_cx.layout_of(tcx.types.isize)?,
404            u8: layout_cx.layout_of(tcx.types.u8)?,
405            u16: layout_cx.layout_of(tcx.types.u16)?,
406            u32: layout_cx.layout_of(tcx.types.u32)?,
407            u64: layout_cx.layout_of(tcx.types.u64)?,
408            u128: layout_cx.layout_of(tcx.types.u128)?,
409            usize: layout_cx.layout_of(tcx.types.usize)?,
410            bool: layout_cx.layout_of(tcx.types.bool)?,
411            mut_raw_ptr: layout_cx.layout_of(mut_raw_ptr)?,
412            const_raw_ptr: layout_cx.layout_of(const_raw_ptr)?,
413        })
414    }
415
416    pub fn uint(&self, size: Size) -> Option<TyAndLayout<'tcx>> {
417        match size.bits() {
418            8 => Some(self.u8),
419            16 => Some(self.u16),
420            32 => Some(self.u32),
421            64 => Some(self.u64),
422            128 => Some(self.u128),
423            _ => None,
424        }
425    }
426
427    pub fn int(&self, size: Size) -> Option<TyAndLayout<'tcx>> {
428        match size.bits() {
429            8 => Some(self.i8),
430            16 => Some(self.i16),
431            32 => Some(self.i32),
432            64 => Some(self.i64),
433            128 => Some(self.i128),
434            _ => None,
435        }
436    }
437}
438
439/// The machine itself.
440///
441/// If you add anything here that stores machine values, remember to update
442/// `visit_all_machine_values`!
443pub struct MiriMachine<'tcx> {
444    // We carry a copy of the global `TyCtxt` for convenience, so methods taking just `&Evaluator` have `tcx` access.
445    pub tcx: TyCtxt<'tcx>,
446
447    /// Global data for borrow tracking.
448    pub borrow_tracker: Option<borrow_tracker::GlobalState>,
449
450    /// Data race detector global data.
451    pub data_race: Option<data_race::GlobalState>,
452
453    /// Ptr-int-cast module global data.
454    pub alloc_addresses: alloc_addresses::GlobalState,
455
456    /// Environment variables.
457    pub(crate) env_vars: EnvVars<'tcx>,
458
459    /// Return place of the main function.
460    pub(crate) main_fn_ret_place: Option<MPlaceTy<'tcx>>,
461
462    /// Program arguments (`Option` because we can only initialize them after creating the ecx).
463    /// These are *pointers* to argc/argv because macOS.
464    /// We also need the full command line as one string because of Windows.
465    pub(crate) argc: Option<Pointer>,
466    pub(crate) argv: Option<Pointer>,
467    pub(crate) cmd_line: Option<Pointer>,
468
469    /// TLS state.
470    pub(crate) tls: TlsData<'tcx>,
471
472    /// What should Miri do when an op requires communicating with the host,
473    /// such as accessing host env vars, random number generation, and
474    /// file system access.
475    pub(crate) isolated_op: IsolatedOp,
476
477    /// Whether to enforce the validity invariant.
478    pub(crate) validation: ValidationMode,
479
480    /// The table of file descriptors.
481    pub(crate) fds: shims::FdTable,
482    /// The table of directory descriptors.
483    pub(crate) dirs: shims::DirTable,
484
485    /// The list of all EpollEventInterest.
486    pub(crate) epoll_interests: shims::EpollInterestTable,
487
488    /// This machine's monotone clock.
489    pub(crate) clock: Clock,
490
491    /// The set of threads.
492    pub(crate) threads: ThreadManager<'tcx>,
493
494    /// Stores which thread is eligible to run on which CPUs.
495    /// This has no effect at all, it is just tracked to produce the correct result
496    /// in `sched_getaffinity`
497    pub(crate) thread_cpu_affinity: FxHashMap<ThreadId, CpuAffinityMask>,
498
499    /// The state of the primitive synchronization objects.
500    pub(crate) sync: SynchronizationObjects,
501
502    /// Precomputed `TyLayout`s for primitive data types that are commonly used inside Miri.
503    pub(crate) layouts: PrimitiveLayouts<'tcx>,
504
505    /// Allocations that are considered roots of static memory (that may leak).
506    pub(crate) static_roots: Vec<AllocId>,
507
508    /// The `measureme` profiler used to record timing information about
509    /// the emulated program.
510    profiler: Option<measureme::Profiler>,
511    /// Used with `profiler` to cache the `StringId`s for event names
512    /// used with `measureme`.
513    string_cache: FxHashMap<String, measureme::StringId>,
514
515    /// Cache of `Instance` exported under the given `Symbol` name.
516    /// `None` means no `Instance` exported under the given name is found.
517    pub(crate) exported_symbols_cache: FxHashMap<Symbol, Option<Instance<'tcx>>>,
518
519    /// Equivalent setting as RUST_BACKTRACE on encountering an error.
520    pub(crate) backtrace_style: BacktraceStyle,
521
522    /// Crates which are considered local for the purposes of error reporting.
523    pub(crate) local_crates: Vec<CrateNum>,
524
525    /// Mapping extern static names to their pointer.
526    extern_statics: FxHashMap<Symbol, StrictPointer>,
527
528    /// The random number generator used for resolving non-determinism.
529    /// Needs to be queried by ptr_to_int, hence needs interior mutability.
530    pub(crate) rng: RefCell<StdRng>,
531
532    /// The allocation IDs to report when they are being allocated
533    /// (helps for debugging memory leaks and use after free bugs).
534    tracked_alloc_ids: FxHashSet<AllocId>,
535    /// For the tracked alloc ids, also report read/write accesses.
536    track_alloc_accesses: bool,
537
538    /// Controls whether alignment of memory accesses is being checked.
539    pub(crate) check_alignment: AlignmentCheck,
540
541    /// Failure rate of compare_exchange_weak, between 0.0 and 1.0
542    pub(crate) cmpxchg_weak_failure_rate: f64,
543
544    /// Corresponds to -Zmiri-mute-stdout-stderr and doesn't write the output but acts as if it succeeded.
545    pub(crate) mute_stdout_stderr: bool,
546
547    /// Whether weak memory emulation is enabled
548    pub(crate) weak_memory: bool,
549
550    /// The probability of the active thread being preempted at the end of each basic block.
551    pub(crate) preemption_rate: f64,
552
553    /// If `Some`, we will report the current stack every N basic blocks.
554    pub(crate) report_progress: Option<u32>,
555    // The total number of blocks that have been executed.
556    pub(crate) basic_block_count: u64,
557
558    /// Handle of the optional shared object file for native functions.
559    #[cfg(unix)]
560    pub native_lib: Option<(libloading::Library, std::path::PathBuf)>,
561    #[cfg(not(unix))]
562    pub native_lib: Option<!>,
563
564    /// Run a garbage collector for BorTags every N basic blocks.
565    pub(crate) gc_interval: u32,
566    /// The number of blocks that passed since the last BorTag GC pass.
567    pub(crate) since_gc: u32,
568
569    /// The number of CPUs to be reported by miri.
570    pub(crate) num_cpus: u32,
571
572    /// Determines Miri's page size and associated values
573    pub(crate) page_size: u64,
574    pub(crate) stack_addr: u64,
575    pub(crate) stack_size: u64,
576
577    /// Whether to collect a backtrace when each allocation is created, just in case it leaks.
578    pub(crate) collect_leak_backtraces: bool,
579
580    /// The spans we will use to report where an allocation was created and deallocated in
581    /// diagnostics.
582    pub(crate) allocation_spans: RefCell<FxHashMap<AllocId, (Span, Option<Span>)>>,
583
584    /// Maps MIR consts to their evaluated result. We combine the const with a "salt" (`usize`)
585    /// that is fixed per stack frame; this lets us have sometimes different results for the
586    /// same const while ensuring consistent results within a single call.
587    const_cache: RefCell<FxHashMap<(mir::Const<'tcx>, usize), OpTy<'tcx>>>,
588
589    /// For each allocation, an offset inside that allocation that was deemed aligned even for
590    /// symbolic alignment checks. This cannot be stored in `AllocExtra` since it needs to be
591    /// tracked for vtables and function allocations as well as regular allocations.
592    ///
593    /// Invariant: the promised alignment will never be less than the native alignment of the
594    /// allocation.
595    pub(crate) symbolic_alignment: RefCell<FxHashMap<AllocId, (Size, Align)>>,
596
597    /// A cache of "data range" computations for unions (i.e., the offsets of non-padding bytes).
598    union_data_ranges: FxHashMap<Ty<'tcx>, RangeSet>,
599
600    /// Caches the sanity-checks for various pthread primitives.
601    pub(crate) pthread_mutex_sanity: Cell<bool>,
602    pub(crate) pthread_rwlock_sanity: Cell<bool>,
603    pub(crate) pthread_condvar_sanity: Cell<bool>,
604
605    /// Remembers whether we already warned about an extern type with Stacked Borrows.
606    pub(crate) sb_extern_type_warned: Cell<bool>,
607    /// Remember whether we already warned about sharing memory with a native call.
608    #[cfg(unix)]
609    pub(crate) native_call_mem_warned: Cell<bool>,
610    /// Remembers which shims have already shown the warning about erroring in isolation.
611    pub(crate) reject_in_isolation_warned: RefCell<FxHashSet<String>>,
612    /// Remembers which int2ptr casts we have already warned about.
613    pub(crate) int2ptr_warned: RefCell<FxHashSet<Span>>,
614
615    /// Cache for `mangle_internal_symbol`.
616    pub(crate) mangle_internal_symbol_cache: FxHashMap<&'static str, String>,
617}
618
619impl<'tcx> MiriMachine<'tcx> {
620    pub(crate) fn new(config: &MiriConfig, layout_cx: LayoutCx<'tcx>) -> Self {
621        let tcx = layout_cx.tcx();
622        let local_crates = helpers::get_local_crates(tcx);
623        let layouts =
624            PrimitiveLayouts::new(layout_cx).expect("Couldn't get layouts of primitive types");
625        let profiler = config.measureme_out.as_ref().map(|out| {
626            let crate_name =
627                tcx.sess.opts.crate_name.clone().unwrap_or_else(|| "unknown-crate".to_string());
628            let pid = process::id();
629            // We adopt the same naming scheme for the profiler output that rustc uses. In rustc,
630            // the PID is padded so that the nondeterministic value of the PID does not spread
631            // nondeterminism to the allocator. In Miri we are not aiming for such performance
632            // control, we just pad for consistency with rustc.
633            let filename = format!("{crate_name}-{pid:07}");
634            let path = Path::new(out).join(filename);
635            measureme::Profiler::new(path).expect("Couldn't create `measureme` profiler")
636        });
637        let rng = StdRng::seed_from_u64(config.seed.unwrap_or(0));
638        let borrow_tracker = config.borrow_tracker.map(|bt| bt.instantiate_global_state(config));
639        let data_race = config.data_race_detector.then(|| data_race::GlobalState::new(config));
640        // Determine page size, stack address, and stack size.
641        // These values are mostly meaningless, but the stack address is also where we start
642        // allocating physical integer addresses for all allocations.
643        let page_size = if let Some(page_size) = config.page_size {
644            page_size
645        } else {
646            let target = &tcx.sess.target;
647            match target.arch.as_ref() {
648                "wasm32" | "wasm64" => 64 * 1024, // https://webassembly.github.io/spec/core/exec/runtime.html#memory-instances
649                "aarch64" => {
650                    if target.options.vendor.as_ref() == "apple" {
651                        // No "definitive" source, but see:
652                        // https://www.wwdcnotes.com/notes/wwdc20/10214/
653                        // https://github.com/ziglang/zig/issues/11308 etc.
654                        16 * 1024
655                    } else {
656                        4 * 1024
657                    }
658                }
659                _ => 4 * 1024,
660            }
661        };
662        // On 16bit targets, 32 pages is more than the entire address space!
663        let stack_addr = if tcx.pointer_size().bits() < 32 { page_size } else { page_size * 32 };
664        let stack_size =
665            if tcx.pointer_size().bits() < 32 { page_size * 4 } else { page_size * 16 };
666        assert!(
667            usize::try_from(config.num_cpus).unwrap() <= cpu_affinity::MAX_CPUS,
668            "miri only supports up to {} CPUs, but {} were configured",
669            cpu_affinity::MAX_CPUS,
670            config.num_cpus
671        );
672        let threads = ThreadManager::default();
673        let mut thread_cpu_affinity = FxHashMap::default();
674        if matches!(&*tcx.sess.target.os, "linux" | "freebsd" | "android") {
675            thread_cpu_affinity
676                .insert(threads.active_thread(), CpuAffinityMask::new(&layout_cx, config.num_cpus));
677        }
678        MiriMachine {
679            tcx,
680            borrow_tracker,
681            data_race,
682            alloc_addresses: RefCell::new(alloc_addresses::GlobalStateInner::new(config, stack_addr)),
683            // `env_vars` depends on a full interpreter so we cannot properly initialize it yet.
684            env_vars: EnvVars::default(),
685            main_fn_ret_place: None,
686            argc: None,
687            argv: None,
688            cmd_line: None,
689            tls: TlsData::default(),
690            isolated_op: config.isolated_op,
691            validation: config.validation,
692            fds: shims::FdTable::init(config.mute_stdout_stderr),
693            epoll_interests: shims::EpollInterestTable::new(),
694            dirs: Default::default(),
695            layouts,
696            threads,
697            thread_cpu_affinity,
698            sync: SynchronizationObjects::default(),
699            static_roots: Vec::new(),
700            profiler,
701            string_cache: Default::default(),
702            exported_symbols_cache: FxHashMap::default(),
703            backtrace_style: config.backtrace_style,
704            local_crates,
705            extern_statics: FxHashMap::default(),
706            rng: RefCell::new(rng),
707            tracked_alloc_ids: config.tracked_alloc_ids.clone(),
708            track_alloc_accesses: config.track_alloc_accesses,
709            check_alignment: config.check_alignment,
710            cmpxchg_weak_failure_rate: config.cmpxchg_weak_failure_rate,
711            mute_stdout_stderr: config.mute_stdout_stderr,
712            weak_memory: config.weak_memory_emulation,
713            preemption_rate: config.preemption_rate,
714            report_progress: config.report_progress,
715            basic_block_count: 0,
716            clock: Clock::new(config.isolated_op == IsolatedOp::Allow),
717            #[cfg(unix)]
718            native_lib: config.native_lib.as_ref().map(|lib_file_path| {
719                let host_triple = rustc_session::config::host_tuple();
720                let target_triple = tcx.sess.opts.target_triple.tuple();
721                // Check if host target == the session target.
722                if host_triple != target_triple {
723                    panic!(
724                        "calling external C functions in linked .so file requires host and target to be the same: host={}, target={}",
725                        host_triple,
726                        target_triple,
727                    );
728                }
729                // Note: it is the user's responsibility to provide a correct SO file.
730                // WATCH OUT: If an invalid/incorrect SO file is specified, this can cause
731                // undefined behaviour in Miri itself!
732                (
733                    unsafe {
734                        libloading::Library::new(lib_file_path)
735                            .expect("failed to read specified extern shared object file")
736                    },
737                    lib_file_path.clone(),
738                )
739            }),
740            #[cfg(not(unix))]
741            native_lib: config.native_lib.as_ref().map(|_| {
742                panic!("calling functions from native libraries via FFI is only supported on Unix")
743            }),
744            gc_interval: config.gc_interval,
745            since_gc: 0,
746            num_cpus: config.num_cpus,
747            page_size,
748            stack_addr,
749            stack_size,
750            collect_leak_backtraces: config.collect_leak_backtraces,
751            allocation_spans: RefCell::new(FxHashMap::default()),
752            const_cache: RefCell::new(FxHashMap::default()),
753            symbolic_alignment: RefCell::new(FxHashMap::default()),
754            union_data_ranges: FxHashMap::default(),
755            pthread_mutex_sanity: Cell::new(false),
756            pthread_rwlock_sanity: Cell::new(false),
757            pthread_condvar_sanity: Cell::new(false),
758            sb_extern_type_warned: Cell::new(false),
759            #[cfg(unix)]
760            native_call_mem_warned: Cell::new(false),
761            reject_in_isolation_warned: Default::default(),
762            int2ptr_warned: Default::default(),
763            mangle_internal_symbol_cache: Default::default(),
764        }
765    }
766
767    pub(crate) fn late_init(
768        ecx: &mut MiriInterpCx<'tcx>,
769        config: &MiriConfig,
770        on_main_stack_empty: StackEmptyCallback<'tcx>,
771    ) -> InterpResult<'tcx> {
772        EnvVars::init(ecx, config)?;
773        MiriMachine::init_extern_statics(ecx)?;
774        ThreadManager::init(ecx, on_main_stack_empty);
775        interp_ok(())
776    }
777
778    pub(crate) fn add_extern_static(ecx: &mut MiriInterpCx<'tcx>, name: &str, ptr: Pointer) {
779        // This got just allocated, so there definitely is a pointer here.
780        let ptr = ptr.into_pointer_or_addr().unwrap();
781        ecx.machine.extern_statics.try_insert(Symbol::intern(name), ptr).unwrap();
782    }
783
784    pub(crate) fn communicate(&self) -> bool {
785        self.isolated_op == IsolatedOp::Allow
786    }
787
788    /// Check whether the stack frame that this `FrameInfo` refers to is part of a local crate.
789    pub(crate) fn is_local(&self, frame: &FrameInfo<'_>) -> bool {
790        let def_id = frame.instance.def_id();
791        def_id.is_local() || self.local_crates.contains(&def_id.krate)
792    }
793
794    /// Called when the interpreter is going to shut down abnormally, such as due to a Ctrl-C.
795    pub(crate) fn handle_abnormal_termination(&mut self) {
796        // All strings in the profile data are stored in a single string table which is not
797        // written to disk until the profiler is dropped. If the interpreter exits without dropping
798        // the profiler, it is not possible to interpret the profile data and all measureme tools
799        // will panic when given the file.
800        drop(self.profiler.take());
801    }
802
803    pub(crate) fn page_align(&self) -> Align {
804        Align::from_bytes(self.page_size).unwrap()
805    }
806
807    pub(crate) fn allocated_span(&self, alloc_id: AllocId) -> Option<SpanData> {
808        self.allocation_spans
809            .borrow()
810            .get(&alloc_id)
811            .map(|(allocated, _deallocated)| allocated.data())
812    }
813
814    pub(crate) fn deallocated_span(&self, alloc_id: AllocId) -> Option<SpanData> {
815        self.allocation_spans
816            .borrow()
817            .get(&alloc_id)
818            .and_then(|(_allocated, deallocated)| *deallocated)
819            .map(Span::data)
820    }
821
822    fn init_allocation(
823        ecx: &MiriInterpCx<'tcx>,
824        id: AllocId,
825        kind: MemoryKind,
826        size: Size,
827        align: Align,
828    ) -> InterpResult<'tcx, AllocExtra<'tcx>> {
829        if ecx.machine.tracked_alloc_ids.contains(&id) {
830            ecx.emit_diagnostic(NonHaltingDiagnostic::CreatedAlloc(id, size, align, kind));
831        }
832
833        let borrow_tracker = ecx
834            .machine
835            .borrow_tracker
836            .as_ref()
837            .map(|bt| bt.borrow_mut().new_allocation(id, size, kind, &ecx.machine));
838
839        let data_race = ecx.machine.data_race.as_ref().map(|data_race| {
840            data_race::AllocState::new_allocation(
841                data_race,
842                &ecx.machine.threads,
843                size,
844                kind,
845                ecx.machine.current_span(),
846            )
847        });
848        let weak_memory = ecx.machine.weak_memory.then(weak_memory::AllocState::new_allocation);
849
850        // If an allocation is leaked, we want to report a backtrace to indicate where it was
851        // allocated. We don't need to record a backtrace for allocations which are allowed to
852        // leak.
853        let backtrace = if kind.may_leak() || !ecx.machine.collect_leak_backtraces {
854            None
855        } else {
856            Some(ecx.generate_stacktrace())
857        };
858
859        if matches!(kind, MemoryKind::Machine(kind) if kind.should_save_allocation_span()) {
860            ecx.machine
861                .allocation_spans
862                .borrow_mut()
863                .insert(id, (ecx.machine.current_span(), None));
864        }
865
866        interp_ok(AllocExtra {
867            borrow_tracker,
868            data_race,
869            weak_memory,
870            backtrace,
871            sync: FxHashMap::default(),
872        })
873    }
874}
875
876impl VisitProvenance for MiriMachine<'_> {
877    fn visit_provenance(&self, visit: &mut VisitWith<'_>) {
878        #[rustfmt::skip]
879        let MiriMachine {
880            threads,
881            thread_cpu_affinity: _,
882            sync: _,
883            tls,
884            env_vars,
885            main_fn_ret_place,
886            argc,
887            argv,
888            cmd_line,
889            extern_statics,
890            dirs,
891            borrow_tracker,
892            data_race,
893            alloc_addresses,
894            fds,
895            epoll_interests:_,
896            tcx: _,
897            isolated_op: _,
898            validation: _,
899            clock: _,
900            layouts: _,
901            static_roots: _,
902            profiler: _,
903            string_cache: _,
904            exported_symbols_cache: _,
905            backtrace_style: _,
906            local_crates: _,
907            rng: _,
908            tracked_alloc_ids: _,
909            track_alloc_accesses: _,
910            check_alignment: _,
911            cmpxchg_weak_failure_rate: _,
912            mute_stdout_stderr: _,
913            weak_memory: _,
914            preemption_rate: _,
915            report_progress: _,
916            basic_block_count: _,
917            native_lib: _,
918            gc_interval: _,
919            since_gc: _,
920            num_cpus: _,
921            page_size: _,
922            stack_addr: _,
923            stack_size: _,
924            collect_leak_backtraces: _,
925            allocation_spans: _,
926            const_cache: _,
927            symbolic_alignment: _,
928            union_data_ranges: _,
929            pthread_mutex_sanity: _,
930            pthread_rwlock_sanity: _,
931            pthread_condvar_sanity: _,
932            sb_extern_type_warned: _,
933            #[cfg(unix)]
934            native_call_mem_warned: _,
935            reject_in_isolation_warned: _,
936            int2ptr_warned: _,
937            mangle_internal_symbol_cache: _,
938        } = self;
939
940        threads.visit_provenance(visit);
941        tls.visit_provenance(visit);
942        env_vars.visit_provenance(visit);
943        dirs.visit_provenance(visit);
944        fds.visit_provenance(visit);
945        data_race.visit_provenance(visit);
946        borrow_tracker.visit_provenance(visit);
947        alloc_addresses.visit_provenance(visit);
948        main_fn_ret_place.visit_provenance(visit);
949        argc.visit_provenance(visit);
950        argv.visit_provenance(visit);
951        cmd_line.visit_provenance(visit);
952        for ptr in extern_statics.values() {
953            ptr.visit_provenance(visit);
954        }
955    }
956}
957
958/// A rustc InterpCx for Miri.
959pub type MiriInterpCx<'tcx> = InterpCx<'tcx, MiriMachine<'tcx>>;
960
961/// A little trait that's useful to be inherited by extension traits.
962pub trait MiriInterpCxExt<'tcx> {
963    fn eval_context_ref<'a>(&'a self) -> &'a MiriInterpCx<'tcx>;
964    fn eval_context_mut<'a>(&'a mut self) -> &'a mut MiriInterpCx<'tcx>;
965}
966impl<'tcx> MiriInterpCxExt<'tcx> for MiriInterpCx<'tcx> {
967    #[inline(always)]
968    fn eval_context_ref(&self) -> &MiriInterpCx<'tcx> {
969        self
970    }
971    #[inline(always)]
972    fn eval_context_mut(&mut self) -> &mut MiriInterpCx<'tcx> {
973        self
974    }
975}
976
977/// Machine hook implementations.
978impl<'tcx> Machine<'tcx> for MiriMachine<'tcx> {
979    type MemoryKind = MiriMemoryKind;
980    type ExtraFnVal = DynSym;
981
982    type FrameExtra = FrameExtra<'tcx>;
983    type AllocExtra = AllocExtra<'tcx>;
984
985    type Provenance = Provenance;
986    type ProvenanceExtra = ProvenanceExtra;
987    type Bytes = MiriAllocBytes;
988
989    type MemoryMap =
990        MonoHashMap<AllocId, (MemoryKind, Allocation<Provenance, Self::AllocExtra, Self::Bytes>)>;
991
992    const GLOBAL_KIND: Option<MiriMemoryKind> = Some(MiriMemoryKind::Global);
993
994    const PANIC_ON_ALLOC_FAIL: bool = false;
995
996    #[inline(always)]
997    fn enforce_alignment(ecx: &MiriInterpCx<'tcx>) -> bool {
998        ecx.machine.check_alignment != AlignmentCheck::None
999    }
1000
1001    #[inline(always)]
1002    fn alignment_check(
1003        ecx: &MiriInterpCx<'tcx>,
1004        alloc_id: AllocId,
1005        alloc_align: Align,
1006        alloc_kind: AllocKind,
1007        offset: Size,
1008        align: Align,
1009    ) -> Option<Misalignment> {
1010        if ecx.machine.check_alignment != AlignmentCheck::Symbolic {
1011            // Just use the built-in check.
1012            return None;
1013        }
1014        if alloc_kind != AllocKind::LiveData {
1015            // Can't have any extra info here.
1016            return None;
1017        }
1018        // Let's see which alignment we have been promised for this allocation.
1019        let (promised_offset, promised_align) = ecx
1020            .machine
1021            .symbolic_alignment
1022            .borrow()
1023            .get(&alloc_id)
1024            .copied()
1025            .unwrap_or((Size::ZERO, alloc_align));
1026        if promised_align < align {
1027            // Definitely not enough.
1028            Some(Misalignment { has: promised_align, required: align })
1029        } else {
1030            // What's the offset between us and the promised alignment?
1031            let distance = offset.bytes().wrapping_sub(promised_offset.bytes());
1032            // That must also be aligned.
1033            if distance % align.bytes() == 0 {
1034                // All looking good!
1035                None
1036            } else {
1037                // The biggest power of two through which `distance` is divisible.
1038                let distance_pow2 = 1 << distance.trailing_zeros();
1039                Some(Misalignment {
1040                    has: Align::from_bytes(distance_pow2).unwrap(),
1041                    required: align,
1042                })
1043            }
1044        }
1045    }
1046
1047    #[inline(always)]
1048    fn enforce_validity(ecx: &MiriInterpCx<'tcx>, _layout: TyAndLayout<'tcx>) -> bool {
1049        ecx.machine.validation != ValidationMode::No
1050    }
1051    #[inline(always)]
1052    fn enforce_validity_recursively(
1053        ecx: &InterpCx<'tcx, Self>,
1054        _layout: TyAndLayout<'tcx>,
1055    ) -> bool {
1056        ecx.machine.validation == ValidationMode::Deep
1057    }
1058
1059    #[inline(always)]
1060    fn ignore_optional_overflow_checks(ecx: &MiriInterpCx<'tcx>) -> bool {
1061        !ecx.tcx.sess.overflow_checks()
1062    }
1063
1064    fn check_fn_target_features(
1065        ecx: &MiriInterpCx<'tcx>,
1066        instance: ty::Instance<'tcx>,
1067    ) -> InterpResult<'tcx> {
1068        let attrs = ecx.tcx.codegen_fn_attrs(instance.def_id());
1069        if attrs
1070            .target_features
1071            .iter()
1072            .any(|feature| !ecx.tcx.sess.target_features.contains(&feature.name))
1073        {
1074            let unavailable = attrs
1075                .target_features
1076                .iter()
1077                .filter(|&feature| {
1078                    !feature.implied && !ecx.tcx.sess.target_features.contains(&feature.name)
1079                })
1080                .fold(String::new(), |mut s, feature| {
1081                    if !s.is_empty() {
1082                        s.push_str(", ");
1083                    }
1084                    s.push_str(feature.name.as_str());
1085                    s
1086                });
1087            let msg = format!(
1088                "calling a function that requires unavailable target features: {unavailable}"
1089            );
1090            // On WASM, this is not UB, but instead gets rejected during validation of the module
1091            // (see #84988).
1092            if ecx.tcx.sess.target.is_like_wasm {
1093                throw_machine_stop!(TerminationInfo::Abort(msg));
1094            } else {
1095                throw_ub_format!("{msg}");
1096            }
1097        }
1098        interp_ok(())
1099    }
1100
1101    #[inline(always)]
1102    fn find_mir_or_eval_fn(
1103        ecx: &mut MiriInterpCx<'tcx>,
1104        instance: ty::Instance<'tcx>,
1105        abi: &FnAbi<'tcx, Ty<'tcx>>,
1106        args: &[FnArg<'tcx, Provenance>],
1107        dest: &MPlaceTy<'tcx>,
1108        ret: Option<mir::BasicBlock>,
1109        unwind: mir::UnwindAction,
1110    ) -> InterpResult<'tcx, Option<(&'tcx mir::Body<'tcx>, ty::Instance<'tcx>)>> {
1111        // For foreign items, try to see if we can emulate them.
1112        if ecx.tcx.is_foreign_item(instance.def_id()) {
1113            // An external function call that does not have a MIR body. We either find MIR elsewhere
1114            // or emulate its effect.
1115            // This will be Ok(None) if we're emulating the intrinsic entirely within Miri (no need
1116            // to run extra MIR), and Ok(Some(body)) if we found MIR to run for the
1117            // foreign function
1118            // Any needed call to `goto_block` will be performed by `emulate_foreign_item`.
1119            let args = ecx.copy_fn_args(args); // FIXME: Should `InPlace` arguments be reset to uninit?
1120            let link_name = Symbol::intern(ecx.tcx.symbol_name(instance).name);
1121            return ecx.emulate_foreign_item(link_name, abi, &args, dest, ret, unwind);
1122        }
1123
1124        // Otherwise, load the MIR.
1125        interp_ok(Some((ecx.load_mir(instance.def, None)?, instance)))
1126    }
1127
1128    #[inline(always)]
1129    fn call_extra_fn(
1130        ecx: &mut MiriInterpCx<'tcx>,
1131        fn_val: DynSym,
1132        abi: &FnAbi<'tcx, Ty<'tcx>>,
1133        args: &[FnArg<'tcx, Provenance>],
1134        dest: &MPlaceTy<'tcx>,
1135        ret: Option<mir::BasicBlock>,
1136        unwind: mir::UnwindAction,
1137    ) -> InterpResult<'tcx> {
1138        let args = ecx.copy_fn_args(args); // FIXME: Should `InPlace` arguments be reset to uninit?
1139        ecx.emulate_dyn_sym(fn_val, abi, &args, dest, ret, unwind)
1140    }
1141
1142    #[inline(always)]
1143    fn call_intrinsic(
1144        ecx: &mut MiriInterpCx<'tcx>,
1145        instance: ty::Instance<'tcx>,
1146        args: &[OpTy<'tcx>],
1147        dest: &MPlaceTy<'tcx>,
1148        ret: Option<mir::BasicBlock>,
1149        unwind: mir::UnwindAction,
1150    ) -> InterpResult<'tcx, Option<ty::Instance<'tcx>>> {
1151        ecx.call_intrinsic(instance, args, dest, ret, unwind)
1152    }
1153
1154    #[inline(always)]
1155    fn assert_panic(
1156        ecx: &mut MiriInterpCx<'tcx>,
1157        msg: &mir::AssertMessage<'tcx>,
1158        unwind: mir::UnwindAction,
1159    ) -> InterpResult<'tcx> {
1160        ecx.assert_panic(msg, unwind)
1161    }
1162
1163    fn panic_nounwind(ecx: &mut InterpCx<'tcx, Self>, msg: &str) -> InterpResult<'tcx> {
1164        ecx.start_panic_nounwind(msg)
1165    }
1166
1167    fn unwind_terminate(
1168        ecx: &mut InterpCx<'tcx, Self>,
1169        reason: mir::UnwindTerminateReason,
1170    ) -> InterpResult<'tcx> {
1171        // Call the lang item.
1172        let panic = ecx.tcx.lang_items().get(reason.lang_item()).unwrap();
1173        let panic = ty::Instance::mono(ecx.tcx.tcx, panic);
1174        ecx.call_function(
1175            panic,
1176            ExternAbi::Rust,
1177            &[],
1178            None,
1179            StackPopCleanup::Goto { ret: None, unwind: mir::UnwindAction::Unreachable },
1180        )?;
1181        interp_ok(())
1182    }
1183
1184    #[inline(always)]
1185    fn binary_ptr_op(
1186        ecx: &MiriInterpCx<'tcx>,
1187        bin_op: mir::BinOp,
1188        left: &ImmTy<'tcx>,
1189        right: &ImmTy<'tcx>,
1190    ) -> InterpResult<'tcx, ImmTy<'tcx>> {
1191        ecx.binary_ptr_op(bin_op, left, right)
1192    }
1193
1194    #[inline(always)]
1195    fn generate_nan<F1: Float + FloatConvert<F2>, F2: Float>(
1196        ecx: &InterpCx<'tcx, Self>,
1197        inputs: &[F1],
1198    ) -> F2 {
1199        ecx.generate_nan(inputs)
1200    }
1201
1202    #[inline(always)]
1203    fn equal_float_min_max<F: Float>(ecx: &MiriInterpCx<'tcx>, a: F, b: F) -> F {
1204        ecx.equal_float_min_max(a, b)
1205    }
1206
1207    #[inline(always)]
1208    fn ub_checks(ecx: &InterpCx<'tcx, Self>) -> InterpResult<'tcx, bool> {
1209        interp_ok(ecx.tcx.sess.ub_checks())
1210    }
1211
1212    #[inline(always)]
1213    fn contract_checks(ecx: &InterpCx<'tcx, Self>) -> InterpResult<'tcx, bool> {
1214        interp_ok(ecx.tcx.sess.contract_checks())
1215    }
1216
1217    #[inline(always)]
1218    fn thread_local_static_pointer(
1219        ecx: &mut MiriInterpCx<'tcx>,
1220        def_id: DefId,
1221    ) -> InterpResult<'tcx, StrictPointer> {
1222        ecx.get_or_create_thread_local_alloc(def_id)
1223    }
1224
1225    fn extern_static_pointer(
1226        ecx: &MiriInterpCx<'tcx>,
1227        def_id: DefId,
1228    ) -> InterpResult<'tcx, StrictPointer> {
1229        let link_name = Symbol::intern(ecx.tcx.symbol_name(Instance::mono(*ecx.tcx, def_id)).name);
1230        if let Some(&ptr) = ecx.machine.extern_statics.get(&link_name) {
1231            // Various parts of the engine rely on `get_alloc_info` for size and alignment
1232            // information. That uses the type information of this static.
1233            // Make sure it matches the Miri allocation for this.
1234            let Provenance::Concrete { alloc_id, .. } = ptr.provenance else {
1235                panic!("extern_statics cannot contain wildcards")
1236            };
1237            let info = ecx.get_alloc_info(alloc_id);
1238            let def_ty = ecx.tcx.type_of(def_id).instantiate_identity();
1239            let extern_decl_layout =
1240                ecx.tcx.layout_of(ecx.typing_env().as_query_input(def_ty)).unwrap();
1241            if extern_decl_layout.size != info.size || extern_decl_layout.align.abi != info.align {
1242                throw_unsup_format!(
1243                    "extern static `{link_name}` has been declared as `{krate}::{name}` \
1244                    with a size of {decl_size} bytes and alignment of {decl_align} bytes, \
1245                    but Miri emulates it via an extern static shim \
1246                    with a size of {shim_size} bytes and alignment of {shim_align} bytes",
1247                    name = ecx.tcx.def_path_str(def_id),
1248                    krate = ecx.tcx.crate_name(def_id.krate),
1249                    decl_size = extern_decl_layout.size.bytes(),
1250                    decl_align = extern_decl_layout.align.abi.bytes(),
1251                    shim_size = info.size.bytes(),
1252                    shim_align = info.align.bytes(),
1253                )
1254            }
1255            interp_ok(ptr)
1256        } else {
1257            throw_unsup_format!("extern static `{link_name}` is not supported by Miri",)
1258        }
1259    }
1260
1261    fn init_local_allocation(
1262        ecx: &MiriInterpCx<'tcx>,
1263        id: AllocId,
1264        kind: MemoryKind,
1265        size: Size,
1266        align: Align,
1267    ) -> InterpResult<'tcx, Self::AllocExtra> {
1268        assert!(kind != MiriMemoryKind::Global.into());
1269        MiriMachine::init_allocation(ecx, id, kind, size, align)
1270    }
1271
1272    fn adjust_alloc_root_pointer(
1273        ecx: &MiriInterpCx<'tcx>,
1274        ptr: interpret::Pointer<CtfeProvenance>,
1275        kind: Option<MemoryKind>,
1276    ) -> InterpResult<'tcx, interpret::Pointer<Provenance>> {
1277        let kind = kind.expect("we set our GLOBAL_KIND so this cannot be None");
1278        let alloc_id = ptr.provenance.alloc_id();
1279        if cfg!(debug_assertions) {
1280            // The machine promises to never call us on thread-local or extern statics.
1281            match ecx.tcx.try_get_global_alloc(alloc_id) {
1282                Some(GlobalAlloc::Static(def_id)) if ecx.tcx.is_thread_local_static(def_id) => {
1283                    panic!("adjust_alloc_root_pointer called on thread-local static")
1284                }
1285                Some(GlobalAlloc::Static(def_id)) if ecx.tcx.is_foreign_item(def_id) => {
1286                    panic!("adjust_alloc_root_pointer called on extern static")
1287                }
1288                _ => {}
1289            }
1290        }
1291        // FIXME: can we somehow preserve the immutability of `ptr`?
1292        let tag = if let Some(borrow_tracker) = &ecx.machine.borrow_tracker {
1293            borrow_tracker.borrow_mut().root_ptr_tag(alloc_id, &ecx.machine)
1294        } else {
1295            // Value does not matter, SB is disabled
1296            BorTag::default()
1297        };
1298        ecx.adjust_alloc_root_pointer(ptr, tag, kind)
1299    }
1300
1301    /// Called on `usize as ptr` casts.
1302    #[inline(always)]
1303    fn ptr_from_addr_cast(ecx: &MiriInterpCx<'tcx>, addr: u64) -> InterpResult<'tcx, Pointer> {
1304        ecx.ptr_from_addr_cast(addr)
1305    }
1306
1307    /// Called on `ptr as usize` casts.
1308    /// (Actually computing the resulting `usize` doesn't need machine help,
1309    /// that's just `Scalar::try_to_int`.)
1310    #[inline(always)]
1311    fn expose_provenance(
1312        ecx: &InterpCx<'tcx, Self>,
1313        provenance: Self::Provenance,
1314    ) -> InterpResult<'tcx> {
1315        ecx.expose_provenance(provenance)
1316    }
1317
1318    /// Convert a pointer with provenance into an allocation-offset pair and extra provenance info.
1319    /// `size` says how many bytes of memory are expected at that pointer. The *sign* of `size` can
1320    /// be used to disambiguate situations where a wildcard pointer sits right in between two
1321    /// allocations.
1322    ///
1323    /// If `ptr.provenance.get_alloc_id()` is `Some(p)`, the returned `AllocId` must be `p`.
1324    /// The resulting `AllocId` will just be used for that one step and the forgotten again
1325    /// (i.e., we'll never turn the data returned here back into a `Pointer` that might be
1326    /// stored in machine state).
1327    ///
1328    /// When this fails, that means the pointer does not point to a live allocation.
1329    fn ptr_get_alloc(
1330        ecx: &MiriInterpCx<'tcx>,
1331        ptr: StrictPointer,
1332        size: i64,
1333    ) -> Option<(AllocId, Size, Self::ProvenanceExtra)> {
1334        let rel = ecx.ptr_get_alloc(ptr, size);
1335
1336        rel.map(|(alloc_id, size)| {
1337            let tag = match ptr.provenance {
1338                Provenance::Concrete { tag, .. } => ProvenanceExtra::Concrete(tag),
1339                Provenance::Wildcard => ProvenanceExtra::Wildcard,
1340            };
1341            (alloc_id, size, tag)
1342        })
1343    }
1344
1345    /// Called to adjust global allocations to the Provenance and AllocExtra of this machine.
1346    ///
1347    /// If `alloc` contains pointers, then they are all pointing to globals.
1348    ///
1349    /// This should avoid copying if no work has to be done! If this returns an owned
1350    /// allocation (because a copy had to be done to adjust things), machine memory will
1351    /// cache the result. (This relies on `AllocMap::get_or` being able to add the
1352    /// owned allocation to the map even when the map is shared.)
1353    fn adjust_global_allocation<'b>(
1354        ecx: &InterpCx<'tcx, Self>,
1355        id: AllocId,
1356        alloc: &'b Allocation,
1357    ) -> InterpResult<'tcx, Cow<'b, Allocation<Self::Provenance, Self::AllocExtra, Self::Bytes>>>
1358    {
1359        let alloc = alloc.adjust_from_tcx(
1360            &ecx.tcx,
1361            |bytes, align| ecx.get_global_alloc_bytes(id, bytes, align),
1362            |ptr| ecx.global_root_pointer(ptr),
1363        )?;
1364        let kind = MiriMemoryKind::Global.into();
1365        let extra = MiriMachine::init_allocation(ecx, id, kind, alloc.size(), alloc.align)?;
1366        interp_ok(Cow::Owned(alloc.with_extra(extra)))
1367    }
1368
1369    #[inline(always)]
1370    fn before_memory_read(
1371        _tcx: TyCtxtAt<'tcx>,
1372        machine: &Self,
1373        alloc_extra: &AllocExtra<'tcx>,
1374        _ptr: Pointer,
1375        (alloc_id, prov_extra): (AllocId, Self::ProvenanceExtra),
1376        range: AllocRange,
1377    ) -> InterpResult<'tcx> {
1378        if machine.track_alloc_accesses && machine.tracked_alloc_ids.contains(&alloc_id) {
1379            machine
1380                .emit_diagnostic(NonHaltingDiagnostic::AccessedAlloc(alloc_id, AccessKind::Read));
1381        }
1382        if let Some(data_race) = &alloc_extra.data_race {
1383            data_race.read(alloc_id, range, NaReadType::Read, None, machine)?;
1384        }
1385        if let Some(borrow_tracker) = &alloc_extra.borrow_tracker {
1386            borrow_tracker.before_memory_read(alloc_id, prov_extra, range, machine)?;
1387        }
1388        if let Some(weak_memory) = &alloc_extra.weak_memory {
1389            weak_memory.memory_accessed(range, machine.data_race.as_ref().unwrap());
1390        }
1391        interp_ok(())
1392    }
1393
1394    #[inline(always)]
1395    fn before_memory_write(
1396        _tcx: TyCtxtAt<'tcx>,
1397        machine: &mut Self,
1398        alloc_extra: &mut AllocExtra<'tcx>,
1399        _ptr: Pointer,
1400        (alloc_id, prov_extra): (AllocId, Self::ProvenanceExtra),
1401        range: AllocRange,
1402    ) -> InterpResult<'tcx> {
1403        if machine.track_alloc_accesses && machine.tracked_alloc_ids.contains(&alloc_id) {
1404            machine
1405                .emit_diagnostic(NonHaltingDiagnostic::AccessedAlloc(alloc_id, AccessKind::Write));
1406        }
1407        if let Some(data_race) = &mut alloc_extra.data_race {
1408            data_race.write(alloc_id, range, NaWriteType::Write, None, machine)?;
1409        }
1410        if let Some(borrow_tracker) = &mut alloc_extra.borrow_tracker {
1411            borrow_tracker.before_memory_write(alloc_id, prov_extra, range, machine)?;
1412        }
1413        if let Some(weak_memory) = &alloc_extra.weak_memory {
1414            weak_memory.memory_accessed(range, machine.data_race.as_ref().unwrap());
1415        }
1416        interp_ok(())
1417    }
1418
1419    #[inline(always)]
1420    fn before_memory_deallocation(
1421        _tcx: TyCtxtAt<'tcx>,
1422        machine: &mut Self,
1423        alloc_extra: &mut AllocExtra<'tcx>,
1424        _ptr: Pointer,
1425        (alloc_id, prove_extra): (AllocId, Self::ProvenanceExtra),
1426        size: Size,
1427        align: Align,
1428        kind: MemoryKind,
1429    ) -> InterpResult<'tcx> {
1430        if machine.tracked_alloc_ids.contains(&alloc_id) {
1431            machine.emit_diagnostic(NonHaltingDiagnostic::FreedAlloc(alloc_id));
1432        }
1433        if let Some(data_race) = &mut alloc_extra.data_race {
1434            data_race.write(
1435                alloc_id,
1436                alloc_range(Size::ZERO, size),
1437                NaWriteType::Deallocate,
1438                None,
1439                machine,
1440            )?;
1441        }
1442        if let Some(borrow_tracker) = &mut alloc_extra.borrow_tracker {
1443            borrow_tracker.before_memory_deallocation(alloc_id, prove_extra, size, machine)?;
1444        }
1445        if let Some((_, deallocated_at)) = machine.allocation_spans.borrow_mut().get_mut(&alloc_id)
1446        {
1447            *deallocated_at = Some(machine.current_span());
1448        }
1449        machine.free_alloc_id(alloc_id, size, align, kind);
1450        interp_ok(())
1451    }
1452
1453    #[inline(always)]
1454    fn retag_ptr_value(
1455        ecx: &mut InterpCx<'tcx, Self>,
1456        kind: mir::RetagKind,
1457        val: &ImmTy<'tcx>,
1458    ) -> InterpResult<'tcx, ImmTy<'tcx>> {
1459        if ecx.machine.borrow_tracker.is_some() {
1460            ecx.retag_ptr_value(kind, val)
1461        } else {
1462            interp_ok(val.clone())
1463        }
1464    }
1465
1466    #[inline(always)]
1467    fn retag_place_contents(
1468        ecx: &mut InterpCx<'tcx, Self>,
1469        kind: mir::RetagKind,
1470        place: &PlaceTy<'tcx>,
1471    ) -> InterpResult<'tcx> {
1472        if ecx.machine.borrow_tracker.is_some() {
1473            ecx.retag_place_contents(kind, place)?;
1474        }
1475        interp_ok(())
1476    }
1477
1478    fn protect_in_place_function_argument(
1479        ecx: &mut InterpCx<'tcx, Self>,
1480        place: &MPlaceTy<'tcx>,
1481    ) -> InterpResult<'tcx> {
1482        // If we have a borrow tracker, we also have it set up protection so that all reads *and
1483        // writes* during this call are insta-UB.
1484        let protected_place = if ecx.machine.borrow_tracker.is_some() {
1485            ecx.protect_place(place)?
1486        } else {
1487            // No borrow tracker.
1488            place.clone()
1489        };
1490        // We do need to write `uninit` so that even after the call ends, the former contents of
1491        // this place cannot be observed any more. We do the write after retagging so that for
1492        // Tree Borrows, this is considered to activate the new tag.
1493        // Conveniently this also ensures that the place actually points to suitable memory.
1494        ecx.write_uninit(&protected_place)?;
1495        // Now we throw away the protected place, ensuring its tag is never used again.
1496        interp_ok(())
1497    }
1498
1499    #[inline(always)]
1500    fn init_frame(
1501        ecx: &mut InterpCx<'tcx, Self>,
1502        frame: Frame<'tcx, Provenance>,
1503    ) -> InterpResult<'tcx, Frame<'tcx, Provenance, FrameExtra<'tcx>>> {
1504        // Start recording our event before doing anything else
1505        let timing = if let Some(profiler) = ecx.machine.profiler.as_ref() {
1506            let fn_name = frame.instance().to_string();
1507            let entry = ecx.machine.string_cache.entry(fn_name.clone());
1508            let name = entry.or_insert_with(|| profiler.alloc_string(&*fn_name));
1509
1510            Some(profiler.start_recording_interval_event_detached(
1511                *name,
1512                measureme::EventId::from_label(*name),
1513                ecx.active_thread().to_u32(),
1514            ))
1515        } else {
1516            None
1517        };
1518
1519        let borrow_tracker = ecx.machine.borrow_tracker.as_ref();
1520
1521        let extra = FrameExtra {
1522            borrow_tracker: borrow_tracker.map(|bt| bt.borrow_mut().new_frame()),
1523            catch_unwind: None,
1524            timing,
1525            is_user_relevant: ecx.machine.is_user_relevant(&frame),
1526            salt: ecx.machine.rng.borrow_mut().random_range(0..ADDRS_PER_ANON_GLOBAL),
1527            data_race: ecx.machine.data_race.as_ref().map(|_| data_race::FrameState::default()),
1528        };
1529
1530        interp_ok(frame.with_extra(extra))
1531    }
1532
1533    fn stack<'a>(
1534        ecx: &'a InterpCx<'tcx, Self>,
1535    ) -> &'a [Frame<'tcx, Self::Provenance, Self::FrameExtra>] {
1536        ecx.active_thread_stack()
1537    }
1538
1539    fn stack_mut<'a>(
1540        ecx: &'a mut InterpCx<'tcx, Self>,
1541    ) -> &'a mut Vec<Frame<'tcx, Self::Provenance, Self::FrameExtra>> {
1542        ecx.active_thread_stack_mut()
1543    }
1544
1545    fn before_terminator(ecx: &mut InterpCx<'tcx, Self>) -> InterpResult<'tcx> {
1546        ecx.machine.basic_block_count += 1u64; // a u64 that is only incremented by 1 will "never" overflow
1547        ecx.machine.since_gc += 1;
1548        // Possibly report our progress. This will point at the terminator we are about to execute.
1549        if let Some(report_progress) = ecx.machine.report_progress {
1550            if ecx.machine.basic_block_count % u64::from(report_progress) == 0 {
1551                ecx.emit_diagnostic(NonHaltingDiagnostic::ProgressReport {
1552                    block_count: ecx.machine.basic_block_count,
1553                });
1554            }
1555        }
1556
1557        // Search for BorTags to find all live pointers, then remove all other tags from borrow
1558        // stacks.
1559        // When debug assertions are enabled, run the GC as often as possible so that any cases
1560        // where it mistakenly removes an important tag become visible.
1561        if ecx.machine.gc_interval > 0 && ecx.machine.since_gc >= ecx.machine.gc_interval {
1562            ecx.machine.since_gc = 0;
1563            ecx.run_provenance_gc();
1564        }
1565
1566        // These are our preemption points.
1567        // (This will only take effect after the terminator has been executed.)
1568        ecx.maybe_preempt_active_thread();
1569
1570        // Make sure some time passes.
1571        ecx.machine.clock.tick();
1572
1573        interp_ok(())
1574    }
1575
1576    #[inline(always)]
1577    fn after_stack_push(ecx: &mut InterpCx<'tcx, Self>) -> InterpResult<'tcx> {
1578        if ecx.frame().extra.is_user_relevant {
1579            // We just pushed a local frame, so we know that the topmost local frame is the topmost
1580            // frame. If we push a non-local frame, there's no need to do anything.
1581            let stack_len = ecx.active_thread_stack().len();
1582            ecx.active_thread_mut().set_top_user_relevant_frame(stack_len - 1);
1583        }
1584        interp_ok(())
1585    }
1586
1587    fn before_stack_pop(
1588        ecx: &InterpCx<'tcx, Self>,
1589        frame: &Frame<'tcx, Self::Provenance, Self::FrameExtra>,
1590    ) -> InterpResult<'tcx> {
1591        // We want this *before* the return value copy, because the return place itself is protected
1592        // until we do `end_call` here.
1593        if ecx.machine.borrow_tracker.is_some() {
1594            ecx.on_stack_pop(frame)?;
1595        }
1596        // tracing-tree can autoamtically annotate scope changes, but it gets very confused by our
1597        // concurrency and what it prints is just plain wrong. So we print our own information
1598        // instead. (Cc https://github.com/rust-lang/miri/issues/2266)
1599        info!("Leaving {}", ecx.frame().instance());
1600        interp_ok(())
1601    }
1602
1603    #[inline(always)]
1604    fn after_stack_pop(
1605        ecx: &mut InterpCx<'tcx, Self>,
1606        frame: Frame<'tcx, Provenance, FrameExtra<'tcx>>,
1607        unwinding: bool,
1608    ) -> InterpResult<'tcx, ReturnAction> {
1609        if frame.extra.is_user_relevant {
1610            // All that we store is whether or not the frame we just removed is local, so now we
1611            // have no idea where the next topmost local frame is. So we recompute it.
1612            // (If this ever becomes a bottleneck, we could have `push` store the previous
1613            // user-relevant frame and restore that here.)
1614            ecx.active_thread_mut().recompute_top_user_relevant_frame();
1615        }
1616        let res = {
1617            // Move `frame`` into a sub-scope so we control when it will be dropped.
1618            let mut frame = frame;
1619            let timing = frame.extra.timing.take();
1620            let res = ecx.handle_stack_pop_unwind(frame.extra, unwinding);
1621            if let Some(profiler) = ecx.machine.profiler.as_ref() {
1622                profiler.finish_recording_interval_event(timing.unwrap());
1623            }
1624            res
1625        };
1626        // Needs to be done after dropping frame to show up on the right nesting level.
1627        // (Cc https://github.com/rust-lang/miri/issues/2266)
1628        if !ecx.active_thread_stack().is_empty() {
1629            info!("Continuing in {}", ecx.frame().instance());
1630        }
1631        res
1632    }
1633
1634    fn after_local_read(
1635        ecx: &InterpCx<'tcx, Self>,
1636        frame: &Frame<'tcx, Provenance, FrameExtra<'tcx>>,
1637        local: mir::Local,
1638    ) -> InterpResult<'tcx> {
1639        if let Some(data_race) = &frame.extra.data_race {
1640            data_race.local_read(local, &ecx.machine);
1641        }
1642        interp_ok(())
1643    }
1644
1645    fn after_local_write(
1646        ecx: &mut InterpCx<'tcx, Self>,
1647        local: mir::Local,
1648        storage_live: bool,
1649    ) -> InterpResult<'tcx> {
1650        if let Some(data_race) = &ecx.frame().extra.data_race {
1651            data_race.local_write(local, storage_live, &ecx.machine);
1652        }
1653        interp_ok(())
1654    }
1655
1656    fn after_local_moved_to_memory(
1657        ecx: &mut InterpCx<'tcx, Self>,
1658        local: mir::Local,
1659        mplace: &MPlaceTy<'tcx>,
1660    ) -> InterpResult<'tcx> {
1661        let Some(Provenance::Concrete { alloc_id, .. }) = mplace.ptr().provenance else {
1662            panic!("after_local_allocated should only be called on fresh allocations");
1663        };
1664        // Record the span where this was allocated: the declaration of the local.
1665        let local_decl = &ecx.frame().body().local_decls[local];
1666        let span = local_decl.source_info.span;
1667        ecx.machine.allocation_spans.borrow_mut().insert(alloc_id, (span, None));
1668        // The data race system has to fix the clocks used for this write.
1669        let (alloc_info, machine) = ecx.get_alloc_extra_mut(alloc_id)?;
1670        if let Some(data_race) =
1671            &machine.threads.active_thread_stack().last().unwrap().extra.data_race
1672        {
1673            data_race.local_moved_to_memory(local, alloc_info.data_race.as_mut().unwrap(), machine);
1674        }
1675        interp_ok(())
1676    }
1677
1678    fn eval_mir_constant<F>(
1679        ecx: &InterpCx<'tcx, Self>,
1680        val: mir::Const<'tcx>,
1681        span: Span,
1682        layout: Option<TyAndLayout<'tcx>>,
1683        eval: F,
1684    ) -> InterpResult<'tcx, OpTy<'tcx>>
1685    where
1686        F: Fn(
1687            &InterpCx<'tcx, Self>,
1688            mir::Const<'tcx>,
1689            Span,
1690            Option<TyAndLayout<'tcx>>,
1691        ) -> InterpResult<'tcx, OpTy<'tcx>>,
1692    {
1693        let frame = ecx.active_thread_stack().last().unwrap();
1694        let mut cache = ecx.machine.const_cache.borrow_mut();
1695        match cache.entry((val, frame.extra.salt)) {
1696            Entry::Vacant(ve) => {
1697                let op = eval(ecx, val, span, layout)?;
1698                ve.insert(op.clone());
1699                interp_ok(op)
1700            }
1701            Entry::Occupied(oe) => interp_ok(oe.get().clone()),
1702        }
1703    }
1704
1705    fn get_global_alloc_salt(
1706        ecx: &InterpCx<'tcx, Self>,
1707        instance: Option<ty::Instance<'tcx>>,
1708    ) -> usize {
1709        let unique = if let Some(instance) = instance {
1710            // Functions cannot be identified by pointers, as asm-equal functions can get
1711            // deduplicated by the linker (we set the "unnamed_addr" attribute for LLVM) and
1712            // functions can be duplicated across crates. We thus generate a new `AllocId` for every
1713            // mention of a function. This means that `main as fn() == main as fn()` is false, while
1714            // `let x = main as fn(); x == x` is true. However, as a quality-of-life feature it can
1715            // be useful to identify certain functions uniquely, e.g. for backtraces. So we identify
1716            // whether codegen will actually emit duplicate functions. It does that when they have
1717            // non-lifetime generics, or when they can be inlined. All other functions are given a
1718            // unique address. This is not a stable guarantee! The `inline` attribute is a hint and
1719            // cannot be relied upon for anything. But if we don't do this, the
1720            // `__rust_begin_short_backtrace`/`__rust_end_short_backtrace` logic breaks and panic
1721            // backtraces look terrible.
1722            let is_generic = instance
1723                .args
1724                .into_iter()
1725                .any(|kind| !matches!(kind.unpack(), ty::GenericArgKind::Lifetime(_)));
1726            let can_be_inlined = matches!(
1727                ecx.tcx.sess.opts.unstable_opts.cross_crate_inline_threshold,
1728                InliningThreshold::Always
1729            ) || !matches!(
1730                ecx.tcx.codegen_fn_attrs(instance.def_id()).inline,
1731                InlineAttr::Never
1732            );
1733            !is_generic && !can_be_inlined
1734        } else {
1735            // Non-functions are never unique.
1736            false
1737        };
1738        // Always use the same salt if the allocation is unique.
1739        if unique {
1740            CTFE_ALLOC_SALT
1741        } else {
1742            ecx.machine.rng.borrow_mut().random_range(0..ADDRS_PER_ANON_GLOBAL)
1743        }
1744    }
1745
1746    fn cached_union_data_range<'e>(
1747        ecx: &'e mut InterpCx<'tcx, Self>,
1748        ty: Ty<'tcx>,
1749        compute_range: impl FnOnce() -> RangeSet,
1750    ) -> Cow<'e, RangeSet> {
1751        Cow::Borrowed(ecx.machine.union_data_ranges.entry(ty).or_insert_with(compute_range))
1752    }
1753}
1754
1755/// Trait for callbacks handling asynchronous machine operations.
1756pub trait MachineCallback<'tcx, T>: VisitProvenance {
1757    /// The function to be invoked when the callback is fired.
1758    fn call(
1759        self: Box<Self>,
1760        ecx: &mut InterpCx<'tcx, MiriMachine<'tcx>>,
1761        arg: T,
1762    ) -> InterpResult<'tcx>;
1763}
1764
1765/// Type alias for boxed machine callbacks with generic argument type.
1766pub type DynMachineCallback<'tcx, T> = Box<dyn MachineCallback<'tcx, T> + 'tcx>;
1767
1768/// Creates a `DynMachineCallback`:
1769///
1770/// ```rust
1771/// callback!(
1772///     @capture<'tcx> {
1773///         var1: Ty1,
1774///         var2: Ty2<'tcx>,
1775///     }
1776///     |this, arg: ArgTy| {
1777///         // Implement the callback here.
1778///         todo!()
1779///     }
1780/// )
1781/// ```
1782///
1783/// All the argument types must implement `VisitProvenance`.
1784#[macro_export]
1785macro_rules! callback {
1786    (@capture<$tcx:lifetime $(,)? $($lft:lifetime),*>
1787        { $($name:ident: $type:ty),* $(,)? }
1788     |$this:ident, $arg:ident: $arg_ty:ty| $body:expr $(,)?) => {{
1789        struct Callback<$tcx, $($lft),*> {
1790            $($name: $type,)*
1791            _phantom: std::marker::PhantomData<&$tcx ()>,
1792        }
1793
1794        impl<$tcx, $($lft),*> VisitProvenance for Callback<$tcx, $($lft),*> {
1795            fn visit_provenance(&self, _visit: &mut VisitWith<'_>) {
1796                $(
1797                    self.$name.visit_provenance(_visit);
1798                )*
1799            }
1800        }
1801
1802        impl<$tcx, $($lft),*> MachineCallback<$tcx, $arg_ty> for Callback<$tcx, $($lft),*> {
1803            fn call(
1804                self: Box<Self>,
1805                $this: &mut MiriInterpCx<$tcx>,
1806                $arg: $arg_ty
1807            ) -> InterpResult<$tcx> {
1808                #[allow(unused_variables)]
1809                let Callback { $($name,)* _phantom } = *self;
1810                $body
1811            }
1812        }
1813
1814        Box::new(Callback {
1815            $($name,)*
1816            _phantom: std::marker::PhantomData
1817        })
1818    }};
1819}