miri/
machine.rs

1//! Global machine state as well as implementation of the interpreter engine
2//! `Machine` trait.
3
4use std::any::Any;
5use std::borrow::Cow;
6use std::cell::{Cell, RefCell};
7use std::collections::hash_map::Entry;
8use std::path::Path;
9use std::rc::Rc;
10use std::{fmt, process};
11
12use rand::rngs::StdRng;
13use rand::{Rng, SeedableRng};
14use rustc_abi::{Align, ExternAbi, Size};
15use rustc_apfloat::{Float, FloatConvert};
16use rustc_attr_data_structures::InlineAttr;
17use rustc_data_structures::fx::{FxHashMap, FxHashSet};
18#[allow(unused)]
19use rustc_data_structures::static_assert_size;
20use rustc_middle::mir;
21use rustc_middle::query::TyCtxtAt;
22use rustc_middle::ty::layout::{
23    HasTyCtxt, HasTypingEnv, LayoutCx, LayoutError, LayoutOf, TyAndLayout,
24};
25use rustc_middle::ty::{self, Instance, Ty, TyCtxt};
26use rustc_session::config::InliningThreshold;
27use rustc_span::def_id::{CrateNum, DefId};
28use rustc_span::{Span, SpanData, Symbol};
29use rustc_target::callconv::FnAbi;
30
31use crate::alloc_addresses::EvalContextExt;
32use crate::concurrency::cpu_affinity::{self, CpuAffinityMask};
33use crate::concurrency::data_race::{self, NaReadType, NaWriteType};
34use crate::concurrency::{AllocDataRaceHandler, GenmcCtx, GlobalDataRaceHandler, weak_memory};
35use crate::*;
36
37/// First real-time signal.
38/// `signal(7)` says this must be between 32 and 64 and specifies 34 or 35
39/// as typical values.
40pub const SIGRTMIN: i32 = 34;
41
42/// Last real-time signal.
43/// `signal(7)` says it must be between 32 and 64 and specifies
44/// `SIGRTMAX` - `SIGRTMIN` >= 8 (which is the value of `_POSIX_RTSIG_MAX`)
45pub const SIGRTMAX: i32 = 42;
46
47/// Each anonymous global (constant, vtable, function pointer, ...) has multiple addresses, but only
48/// this many. Since const allocations are never deallocated, choosing a new [`AllocId`] and thus
49/// base address for each evaluation would produce unbounded memory usage.
50const ADDRS_PER_ANON_GLOBAL: usize = 32;
51
52/// Extra data stored with each stack frame
53pub struct FrameExtra<'tcx> {
54    /// Extra data for the Borrow Tracker.
55    pub borrow_tracker: Option<borrow_tracker::FrameState>,
56
57    /// If this is Some(), then this is a special "catch unwind" frame (the frame of `try_fn`
58    /// called by `try`). When this frame is popped during unwinding a panic,
59    /// we stop unwinding, use the `CatchUnwindData` to handle catching.
60    pub catch_unwind: Option<CatchUnwindData<'tcx>>,
61
62    /// If `measureme` profiling is enabled, holds timing information
63    /// for the start of this frame. When we finish executing this frame,
64    /// we use this to register a completed event with `measureme`.
65    pub timing: Option<measureme::DetachedTiming>,
66
67    /// Indicates whether a `Frame` is part of a workspace-local crate and is also not
68    /// `#[track_caller]`. We compute this once on creation and store the result, as an
69    /// optimization.
70    /// This is used by `MiriMachine::current_span` and `MiriMachine::caller_span`
71    pub is_user_relevant: bool,
72
73    /// We have a cache for the mapping from [`mir::Const`] to resulting [`AllocId`].
74    /// However, we don't want all frames to always get the same result, so we insert
75    /// an additional bit of "salt" into the cache key. This salt is fixed per-frame
76    /// so that within a call, a const will have a stable address.
77    salt: usize,
78
79    /// Data race detector per-frame data.
80    pub data_race: Option<data_race::FrameState>,
81}
82
83impl<'tcx> std::fmt::Debug for FrameExtra<'tcx> {
84    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
85        // Omitting `timing`, it does not support `Debug`.
86        let FrameExtra {
87            borrow_tracker,
88            catch_unwind,
89            timing: _,
90            is_user_relevant,
91            salt,
92            data_race,
93        } = self;
94        f.debug_struct("FrameData")
95            .field("borrow_tracker", borrow_tracker)
96            .field("catch_unwind", catch_unwind)
97            .field("is_user_relevant", is_user_relevant)
98            .field("salt", salt)
99            .field("data_race", data_race)
100            .finish()
101    }
102}
103
104impl VisitProvenance for FrameExtra<'_> {
105    fn visit_provenance(&self, visit: &mut VisitWith<'_>) {
106        let FrameExtra {
107            catch_unwind,
108            borrow_tracker,
109            timing: _,
110            is_user_relevant: _,
111            salt: _,
112            data_race: _,
113        } = self;
114
115        catch_unwind.visit_provenance(visit);
116        borrow_tracker.visit_provenance(visit);
117    }
118}
119
120/// Extra memory kinds
121#[derive(Debug, Copy, Clone, PartialEq, Eq)]
122pub enum MiriMemoryKind {
123    /// `__rust_alloc` memory.
124    Rust,
125    /// `miri_alloc` memory.
126    Miri,
127    /// `malloc` memory.
128    C,
129    /// Windows `HeapAlloc` memory.
130    WinHeap,
131    /// Windows "local" memory (to be freed with `LocalFree`)
132    WinLocal,
133    /// Memory for args, errno, env vars, and other parts of the machine-managed environment.
134    /// This memory may leak.
135    Machine,
136    /// Memory allocated by the runtime, e.g. for readdir. Separate from `Machine` because we clean
137    /// it up (or expect the user to invoke operations that clean it up) and leak-check it.
138    Runtime,
139    /// Globals copied from `tcx`.
140    /// This memory may leak.
141    Global,
142    /// Memory for extern statics.
143    /// This memory may leak.
144    ExternStatic,
145    /// Memory for thread-local statics.
146    /// This memory may leak.
147    Tls,
148    /// Memory mapped directly by the program
149    Mmap,
150}
151
152impl From<MiriMemoryKind> for MemoryKind {
153    #[inline(always)]
154    fn from(kind: MiriMemoryKind) -> MemoryKind {
155        MemoryKind::Machine(kind)
156    }
157}
158
159impl MayLeak for MiriMemoryKind {
160    #[inline(always)]
161    fn may_leak(self) -> bool {
162        use self::MiriMemoryKind::*;
163        match self {
164            Rust | Miri | C | WinHeap | WinLocal | Runtime => false,
165            Machine | Global | ExternStatic | Tls | Mmap => true,
166        }
167    }
168}
169
170impl MiriMemoryKind {
171    /// Whether we have a useful allocation span for an allocation of this kind.
172    fn should_save_allocation_span(self) -> bool {
173        use self::MiriMemoryKind::*;
174        match self {
175            // Heap allocations are fine since the `Allocation` is created immediately.
176            Rust | Miri | C | WinHeap | WinLocal | Mmap => true,
177            // Everything else is unclear, let's not show potentially confusing spans.
178            Machine | Global | ExternStatic | Tls | Runtime => false,
179        }
180    }
181}
182
183impl fmt::Display for MiriMemoryKind {
184    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
185        use self::MiriMemoryKind::*;
186        match self {
187            Rust => write!(f, "Rust heap"),
188            Miri => write!(f, "Miri bare-metal heap"),
189            C => write!(f, "C heap"),
190            WinHeap => write!(f, "Windows heap"),
191            WinLocal => write!(f, "Windows local memory"),
192            Machine => write!(f, "machine-managed memory"),
193            Runtime => write!(f, "language runtime memory"),
194            Global => write!(f, "global (static or const)"),
195            ExternStatic => write!(f, "extern static"),
196            Tls => write!(f, "thread-local static"),
197            Mmap => write!(f, "mmap"),
198        }
199    }
200}
201
202pub type MemoryKind = interpret::MemoryKind<MiriMemoryKind>;
203
204/// Pointer provenance.
205// This needs to be `Eq`+`Hash` because the `Machine` trait needs that because validity checking
206// *might* be recursive and then it has to track which places have already been visited.
207// These implementations are a bit questionable, and it means we may check the same place multiple
208// times with different provenance, but that is in general not wrong.
209#[derive(Clone, Copy, PartialEq, Eq, Hash)]
210pub enum Provenance {
211    /// For pointers with concrete provenance. we exactly know which allocation they are attached to
212    /// and what their borrow tag is.
213    Concrete {
214        alloc_id: AllocId,
215        /// Borrow Tracker tag.
216        tag: BorTag,
217    },
218    /// Pointers with wildcard provenance are created on int-to-ptr casts. According to the
219    /// specification, we should at that point angelically "guess" a provenance that will make all
220    /// future uses of this pointer work, if at all possible. Of course such a semantics cannot be
221    /// actually implemented in Miri. So instead, we approximate this, erroring on the side of
222    /// accepting too much code rather than rejecting correct code: a pointer with wildcard
223    /// provenance "acts like" any previously exposed pointer. Each time it is used, we check
224    /// whether *some* exposed pointer could have done what we want to do, and if the answer is yes
225    /// then we allow the access. This allows too much code in two ways:
226    /// - The same wildcard pointer can "take the role" of multiple different exposed pointers on
227    ///   subsequent memory accesses.
228    /// - In the aliasing model, we don't just have to know the borrow tag of the pointer used for
229    ///   the access, we also have to update the aliasing state -- and that update can be very
230    ///   different depending on which borrow tag we pick! Stacked Borrows has support for this by
231    ///   switching to a stack that is only approximately known, i.e. we over-approximate the effect
232    ///   of using *any* exposed pointer for this access, and only keep information about the borrow
233    ///   stack that would be true with all possible choices.
234    Wildcard,
235}
236
237/// The "extra" information a pointer has over a regular AllocId.
238#[derive(Copy, Clone, PartialEq)]
239pub enum ProvenanceExtra {
240    Concrete(BorTag),
241    Wildcard,
242}
243
244#[cfg(target_pointer_width = "64")]
245static_assert_size!(StrictPointer, 24);
246// FIXME: this would with in 24bytes but layout optimizations are not smart enough
247// #[cfg(target_pointer_width = "64")]
248//static_assert_size!(Pointer, 24);
249#[cfg(target_pointer_width = "64")]
250static_assert_size!(Scalar, 32);
251
252impl fmt::Debug for Provenance {
253    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
254        match self {
255            Provenance::Concrete { alloc_id, tag } => {
256                // Forward `alternate` flag to `alloc_id` printing.
257                if f.alternate() {
258                    write!(f, "[{alloc_id:#?}]")?;
259                } else {
260                    write!(f, "[{alloc_id:?}]")?;
261                }
262                // Print Borrow Tracker tag.
263                write!(f, "{tag:?}")?;
264            }
265            Provenance::Wildcard => {
266                write!(f, "[wildcard]")?;
267            }
268        }
269        Ok(())
270    }
271}
272
273impl interpret::Provenance for Provenance {
274    /// We use absolute addresses in the `offset` of a `StrictPointer`.
275    const OFFSET_IS_ADDR: bool = true;
276
277    /// Miri implements wildcard provenance.
278    const WILDCARD: Option<Self> = Some(Provenance::Wildcard);
279
280    fn get_alloc_id(self) -> Option<AllocId> {
281        match self {
282            Provenance::Concrete { alloc_id, .. } => Some(alloc_id),
283            Provenance::Wildcard => None,
284        }
285    }
286
287    fn fmt(ptr: &interpret::Pointer<Self>, f: &mut fmt::Formatter<'_>) -> fmt::Result {
288        let (prov, addr) = ptr.into_raw_parts(); // offset is absolute address
289        write!(f, "{:#x}", addr.bytes())?;
290        if f.alternate() {
291            write!(f, "{prov:#?}")?;
292        } else {
293            write!(f, "{prov:?}")?;
294        }
295        Ok(())
296    }
297
298    fn join(left: Option<Self>, right: Option<Self>) -> Option<Self> {
299        match (left, right) {
300            // If both are the *same* concrete tag, that is the result.
301            (
302                Some(Provenance::Concrete { alloc_id: left_alloc, tag: left_tag }),
303                Some(Provenance::Concrete { alloc_id: right_alloc, tag: right_tag }),
304            ) if left_alloc == right_alloc && left_tag == right_tag => left,
305            // If one side is a wildcard, the best possible outcome is that it is equal to the other
306            // one, and we use that.
307            (Some(Provenance::Wildcard), o) | (o, Some(Provenance::Wildcard)) => o,
308            // Otherwise, fall back to `None`.
309            _ => None,
310        }
311    }
312}
313
314impl fmt::Debug for ProvenanceExtra {
315    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
316        match self {
317            ProvenanceExtra::Concrete(pid) => write!(f, "{pid:?}"),
318            ProvenanceExtra::Wildcard => write!(f, "<wildcard>"),
319        }
320    }
321}
322
323impl ProvenanceExtra {
324    pub fn and_then<T>(self, f: impl FnOnce(BorTag) -> Option<T>) -> Option<T> {
325        match self {
326            ProvenanceExtra::Concrete(pid) => f(pid),
327            ProvenanceExtra::Wildcard => None,
328        }
329    }
330}
331
332/// Extra per-allocation data
333#[derive(Debug)]
334pub struct AllocExtra<'tcx> {
335    /// Global state of the borrow tracker, if enabled.
336    pub borrow_tracker: Option<borrow_tracker::AllocState>,
337    /// Extra state for data race detection.
338    ///
339    /// Invariant: The enum variant must match the enum variant in the `data_race` field on `MiriMachine`
340    pub data_race: AllocDataRaceHandler,
341    /// A backtrace to where this allocation was allocated.
342    /// As this is recorded for leak reports, it only exists
343    /// if this allocation is leakable. The backtrace is not
344    /// pruned yet; that should be done before printing it.
345    pub backtrace: Option<Vec<FrameInfo<'tcx>>>,
346    /// Synchronization primitives like to attach extra data to particular addresses. We store that
347    /// inside the relevant allocation, to ensure that everything is removed when the allocation is
348    /// freed.
349    /// This maps offsets to synchronization-primitive-specific data.
350    pub sync: FxHashMap<Size, Box<dyn Any>>,
351}
352
353// We need a `Clone` impl because the machine passes `Allocation` through `Cow`...
354// but that should never end up actually cloning our `AllocExtra`.
355impl<'tcx> Clone for AllocExtra<'tcx> {
356    fn clone(&self) -> Self {
357        panic!("our allocations should never be cloned");
358    }
359}
360
361impl VisitProvenance for AllocExtra<'_> {
362    fn visit_provenance(&self, visit: &mut VisitWith<'_>) {
363        let AllocExtra { borrow_tracker, data_race, backtrace: _, sync: _ } = self;
364
365        borrow_tracker.visit_provenance(visit);
366        data_race.visit_provenance(visit);
367    }
368}
369
370/// Precomputed layouts of primitive types
371pub struct PrimitiveLayouts<'tcx> {
372    pub unit: TyAndLayout<'tcx>,
373    pub i8: TyAndLayout<'tcx>,
374    pub i16: TyAndLayout<'tcx>,
375    pub i32: TyAndLayout<'tcx>,
376    pub i64: TyAndLayout<'tcx>,
377    pub i128: TyAndLayout<'tcx>,
378    pub isize: TyAndLayout<'tcx>,
379    pub u8: TyAndLayout<'tcx>,
380    pub u16: TyAndLayout<'tcx>,
381    pub u32: TyAndLayout<'tcx>,
382    pub u64: TyAndLayout<'tcx>,
383    pub u128: TyAndLayout<'tcx>,
384    pub usize: TyAndLayout<'tcx>,
385    pub bool: TyAndLayout<'tcx>,
386    pub mut_raw_ptr: TyAndLayout<'tcx>,   // *mut ()
387    pub const_raw_ptr: TyAndLayout<'tcx>, // *const ()
388}
389
390impl<'tcx> PrimitiveLayouts<'tcx> {
391    fn new(layout_cx: LayoutCx<'tcx>) -> Result<Self, &'tcx LayoutError<'tcx>> {
392        let tcx = layout_cx.tcx();
393        let mut_raw_ptr = Ty::new_mut_ptr(tcx, tcx.types.unit);
394        let const_raw_ptr = Ty::new_imm_ptr(tcx, tcx.types.unit);
395        Ok(Self {
396            unit: layout_cx.layout_of(tcx.types.unit)?,
397            i8: layout_cx.layout_of(tcx.types.i8)?,
398            i16: layout_cx.layout_of(tcx.types.i16)?,
399            i32: layout_cx.layout_of(tcx.types.i32)?,
400            i64: layout_cx.layout_of(tcx.types.i64)?,
401            i128: layout_cx.layout_of(tcx.types.i128)?,
402            isize: layout_cx.layout_of(tcx.types.isize)?,
403            u8: layout_cx.layout_of(tcx.types.u8)?,
404            u16: layout_cx.layout_of(tcx.types.u16)?,
405            u32: layout_cx.layout_of(tcx.types.u32)?,
406            u64: layout_cx.layout_of(tcx.types.u64)?,
407            u128: layout_cx.layout_of(tcx.types.u128)?,
408            usize: layout_cx.layout_of(tcx.types.usize)?,
409            bool: layout_cx.layout_of(tcx.types.bool)?,
410            mut_raw_ptr: layout_cx.layout_of(mut_raw_ptr)?,
411            const_raw_ptr: layout_cx.layout_of(const_raw_ptr)?,
412        })
413    }
414
415    pub fn uint(&self, size: Size) -> Option<TyAndLayout<'tcx>> {
416        match size.bits() {
417            8 => Some(self.u8),
418            16 => Some(self.u16),
419            32 => Some(self.u32),
420            64 => Some(self.u64),
421            128 => Some(self.u128),
422            _ => None,
423        }
424    }
425
426    pub fn int(&self, size: Size) -> Option<TyAndLayout<'tcx>> {
427        match size.bits() {
428            8 => Some(self.i8),
429            16 => Some(self.i16),
430            32 => Some(self.i32),
431            64 => Some(self.i64),
432            128 => Some(self.i128),
433            _ => None,
434        }
435    }
436}
437
438/// The machine itself.
439///
440/// If you add anything here that stores machine values, remember to update
441/// `visit_all_machine_values`!
442pub struct MiriMachine<'tcx> {
443    // We carry a copy of the global `TyCtxt` for convenience, so methods taking just `&Evaluator` have `tcx` access.
444    pub tcx: TyCtxt<'tcx>,
445
446    /// Global data for borrow tracking.
447    pub borrow_tracker: Option<borrow_tracker::GlobalState>,
448
449    /// Depending on settings, this will be `None`,
450    /// global data for a data race detector,
451    /// or the context required for running in GenMC mode.
452    ///
453    /// Invariant: The enum variant must match the enum variant of `AllocDataRaceHandler` in the `data_race` field of all `AllocExtra`.
454    pub data_race: GlobalDataRaceHandler,
455
456    /// Ptr-int-cast module global data.
457    pub alloc_addresses: alloc_addresses::GlobalState,
458
459    /// Environment variables.
460    pub(crate) env_vars: EnvVars<'tcx>,
461
462    /// Return place of the main function.
463    pub(crate) main_fn_ret_place: Option<MPlaceTy<'tcx>>,
464
465    /// Program arguments (`Option` because we can only initialize them after creating the ecx).
466    /// These are *pointers* to argc/argv because macOS.
467    /// We also need the full command line as one string because of Windows.
468    pub(crate) argc: Option<Pointer>,
469    pub(crate) argv: Option<Pointer>,
470    pub(crate) cmd_line: Option<Pointer>,
471
472    /// TLS state.
473    pub(crate) tls: TlsData<'tcx>,
474
475    /// What should Miri do when an op requires communicating with the host,
476    /// such as accessing host env vars, random number generation, and
477    /// file system access.
478    pub(crate) isolated_op: IsolatedOp,
479
480    /// Whether to enforce the validity invariant.
481    pub(crate) validation: ValidationMode,
482
483    /// The table of file descriptors.
484    pub(crate) fds: shims::FdTable,
485    /// The table of directory descriptors.
486    pub(crate) dirs: shims::DirTable,
487
488    /// The list of all EpollEventInterest.
489    pub(crate) epoll_interests: shims::EpollInterestTable,
490
491    /// This machine's monotone clock.
492    pub(crate) monotonic_clock: MonotonicClock,
493
494    /// The set of threads.
495    pub(crate) threads: ThreadManager<'tcx>,
496
497    /// Stores which thread is eligible to run on which CPUs.
498    /// This has no effect at all, it is just tracked to produce the correct result
499    /// in `sched_getaffinity`
500    pub(crate) thread_cpu_affinity: FxHashMap<ThreadId, CpuAffinityMask>,
501
502    /// Precomputed `TyLayout`s for primitive data types that are commonly used inside Miri.
503    pub(crate) layouts: PrimitiveLayouts<'tcx>,
504
505    /// Allocations that are considered roots of static memory (that may leak).
506    pub(crate) static_roots: Vec<AllocId>,
507
508    /// The `measureme` profiler used to record timing information about
509    /// the emulated program.
510    profiler: Option<measureme::Profiler>,
511    /// Used with `profiler` to cache the `StringId`s for event names
512    /// used with `measureme`.
513    string_cache: FxHashMap<String, measureme::StringId>,
514
515    /// Cache of `Instance` exported under the given `Symbol` name.
516    /// `None` means no `Instance` exported under the given name is found.
517    pub(crate) exported_symbols_cache: FxHashMap<Symbol, Option<Instance<'tcx>>>,
518
519    /// Equivalent setting as RUST_BACKTRACE on encountering an error.
520    pub(crate) backtrace_style: BacktraceStyle,
521
522    /// Crates which are considered local for the purposes of error reporting.
523    pub(crate) local_crates: Vec<CrateNum>,
524
525    /// Mapping extern static names to their pointer.
526    extern_statics: FxHashMap<Symbol, StrictPointer>,
527
528    /// The random number generator used for resolving non-determinism.
529    /// Needs to be queried by ptr_to_int, hence needs interior mutability.
530    pub(crate) rng: RefCell<StdRng>,
531
532    /// The allocator used for the machine's `AllocBytes` in native-libs mode.
533    #[cfg(target_os = "linux")]
534    pub(crate) allocator: Option<Rc<RefCell<crate::alloc::isolated_alloc::IsolatedAlloc>>>,
535
536    /// The allocation IDs to report when they are being allocated
537    /// (helps for debugging memory leaks and use after free bugs).
538    tracked_alloc_ids: FxHashSet<AllocId>,
539    /// For the tracked alloc ids, also report read/write accesses.
540    track_alloc_accesses: bool,
541
542    /// Controls whether alignment of memory accesses is being checked.
543    pub(crate) check_alignment: AlignmentCheck,
544
545    /// Failure rate of compare_exchange_weak, between 0.0 and 1.0
546    pub(crate) cmpxchg_weak_failure_rate: f64,
547
548    /// The probability of the active thread being preempted at the end of each basic block.
549    pub(crate) preemption_rate: f64,
550
551    /// If `Some`, we will report the current stack every N basic blocks.
552    pub(crate) report_progress: Option<u32>,
553    // The total number of blocks that have been executed.
554    pub(crate) basic_block_count: u64,
555
556    /// Handle of the optional shared object file for native functions.
557    #[cfg(unix)]
558    pub native_lib: Vec<(libloading::Library, std::path::PathBuf)>,
559    #[cfg(not(unix))]
560    pub native_lib: Vec<!>,
561
562    /// Run a garbage collector for BorTags every N basic blocks.
563    pub(crate) gc_interval: u32,
564    /// The number of blocks that passed since the last BorTag GC pass.
565    pub(crate) since_gc: u32,
566
567    /// The number of CPUs to be reported by miri.
568    pub(crate) num_cpus: u32,
569
570    /// Determines Miri's page size and associated values
571    pub(crate) page_size: u64,
572    pub(crate) stack_addr: u64,
573    pub(crate) stack_size: u64,
574
575    /// Whether to collect a backtrace when each allocation is created, just in case it leaks.
576    pub(crate) collect_leak_backtraces: bool,
577
578    /// The spans we will use to report where an allocation was created and deallocated in
579    /// diagnostics.
580    pub(crate) allocation_spans: RefCell<FxHashMap<AllocId, (Span, Option<Span>)>>,
581
582    /// Maps MIR consts to their evaluated result. We combine the const with a "salt" (`usize`)
583    /// that is fixed per stack frame; this lets us have sometimes different results for the
584    /// same const while ensuring consistent results within a single call.
585    const_cache: RefCell<FxHashMap<(mir::Const<'tcx>, usize), OpTy<'tcx>>>,
586
587    /// For each allocation, an offset inside that allocation that was deemed aligned even for
588    /// symbolic alignment checks. This cannot be stored in `AllocExtra` since it needs to be
589    /// tracked for vtables and function allocations as well as regular allocations.
590    ///
591    /// Invariant: the promised alignment will never be less than the native alignment of the
592    /// allocation.
593    pub(crate) symbolic_alignment: RefCell<FxHashMap<AllocId, (Size, Align)>>,
594
595    /// A cache of "data range" computations for unions (i.e., the offsets of non-padding bytes).
596    union_data_ranges: FxHashMap<Ty<'tcx>, RangeSet>,
597
598    /// Caches the sanity-checks for various pthread primitives.
599    pub(crate) pthread_mutex_sanity: Cell<bool>,
600    pub(crate) pthread_rwlock_sanity: Cell<bool>,
601    pub(crate) pthread_condvar_sanity: Cell<bool>,
602
603    /// Remembers whether we already warned about an extern type with Stacked Borrows.
604    pub(crate) sb_extern_type_warned: Cell<bool>,
605    /// Remember whether we already warned about sharing memory with a native call.
606    #[cfg(unix)]
607    pub(crate) native_call_mem_warned: Cell<bool>,
608    /// Remembers which shims have already shown the warning about erroring in isolation.
609    pub(crate) reject_in_isolation_warned: RefCell<FxHashSet<String>>,
610    /// Remembers which int2ptr casts we have already warned about.
611    pub(crate) int2ptr_warned: RefCell<FxHashSet<Span>>,
612
613    /// Cache for `mangle_internal_symbol`.
614    pub(crate) mangle_internal_symbol_cache: FxHashMap<&'static str, String>,
615
616    /// Always prefer the intrinsic fallback body over the native Miri implementation.
617    pub force_intrinsic_fallback: bool,
618
619    /// Whether floating-point operations can behave non-deterministically.
620    pub float_nondet: bool,
621}
622
623impl<'tcx> MiriMachine<'tcx> {
624    pub(crate) fn new(
625        config: &MiriConfig,
626        layout_cx: LayoutCx<'tcx>,
627        genmc_ctx: Option<Rc<GenmcCtx>>,
628    ) -> Self {
629        let tcx = layout_cx.tcx();
630        let local_crates = helpers::get_local_crates(tcx);
631        let layouts =
632            PrimitiveLayouts::new(layout_cx).expect("Couldn't get layouts of primitive types");
633        let profiler = config.measureme_out.as_ref().map(|out| {
634            let crate_name =
635                tcx.sess.opts.crate_name.clone().unwrap_or_else(|| "unknown-crate".to_string());
636            let pid = process::id();
637            // We adopt the same naming scheme for the profiler output that rustc uses. In rustc,
638            // the PID is padded so that the nondeterministic value of the PID does not spread
639            // nondeterminism to the allocator. In Miri we are not aiming for such performance
640            // control, we just pad for consistency with rustc.
641            let filename = format!("{crate_name}-{pid:07}");
642            let path = Path::new(out).join(filename);
643            measureme::Profiler::new(path).expect("Couldn't create `measureme` profiler")
644        });
645        let rng = StdRng::seed_from_u64(config.seed.unwrap_or(0));
646        let borrow_tracker = config.borrow_tracker.map(|bt| bt.instantiate_global_state(config));
647        let data_race = if config.genmc_mode {
648            // `genmc_ctx` persists across executions, so we don't create a new one here.
649            GlobalDataRaceHandler::Genmc(genmc_ctx.unwrap())
650        } else if config.data_race_detector {
651            GlobalDataRaceHandler::Vclocks(Box::new(data_race::GlobalState::new(config)))
652        } else {
653            GlobalDataRaceHandler::None
654        };
655        // Determine page size, stack address, and stack size.
656        // These values are mostly meaningless, but the stack address is also where we start
657        // allocating physical integer addresses for all allocations.
658        let page_size = if let Some(page_size) = config.page_size {
659            page_size
660        } else {
661            let target = &tcx.sess.target;
662            match target.arch.as_ref() {
663                "wasm32" | "wasm64" => 64 * 1024, // https://webassembly.github.io/spec/core/exec/runtime.html#memory-instances
664                "aarch64" => {
665                    if target.options.vendor.as_ref() == "apple" {
666                        // No "definitive" source, but see:
667                        // https://www.wwdcnotes.com/notes/wwdc20/10214/
668                        // https://github.com/ziglang/zig/issues/11308 etc.
669                        16 * 1024
670                    } else {
671                        4 * 1024
672                    }
673                }
674                _ => 4 * 1024,
675            }
676        };
677        // On 16bit targets, 32 pages is more than the entire address space!
678        let stack_addr = if tcx.pointer_size().bits() < 32 { page_size } else { page_size * 32 };
679        let stack_size =
680            if tcx.pointer_size().bits() < 32 { page_size * 4 } else { page_size * 16 };
681        assert!(
682            usize::try_from(config.num_cpus).unwrap() <= cpu_affinity::MAX_CPUS,
683            "miri only supports up to {} CPUs, but {} were configured",
684            cpu_affinity::MAX_CPUS,
685            config.num_cpus
686        );
687        let threads = ThreadManager::new(config);
688        let mut thread_cpu_affinity = FxHashMap::default();
689        if matches!(&*tcx.sess.target.os, "linux" | "freebsd" | "android") {
690            thread_cpu_affinity
691                .insert(threads.active_thread(), CpuAffinityMask::new(&layout_cx, config.num_cpus));
692        }
693        MiriMachine {
694            tcx,
695            borrow_tracker,
696            data_race,
697            alloc_addresses: RefCell::new(alloc_addresses::GlobalStateInner::new(config, stack_addr)),
698            // `env_vars` depends on a full interpreter so we cannot properly initialize it yet.
699            env_vars: EnvVars::default(),
700            main_fn_ret_place: None,
701            argc: None,
702            argv: None,
703            cmd_line: None,
704            tls: TlsData::default(),
705            isolated_op: config.isolated_op,
706            validation: config.validation,
707            fds: shims::FdTable::init(config.mute_stdout_stderr),
708            epoll_interests: shims::EpollInterestTable::new(),
709            dirs: Default::default(),
710            layouts,
711            threads,
712            thread_cpu_affinity,
713            static_roots: Vec::new(),
714            profiler,
715            string_cache: Default::default(),
716            exported_symbols_cache: FxHashMap::default(),
717            backtrace_style: config.backtrace_style,
718            local_crates,
719            extern_statics: FxHashMap::default(),
720            rng: RefCell::new(rng),
721            #[cfg(target_os = "linux")]
722            allocator: if !config.native_lib.is_empty() {
723                Some(Rc::new(RefCell::new(crate::alloc::isolated_alloc::IsolatedAlloc::new())))
724            } else { None },
725            tracked_alloc_ids: config.tracked_alloc_ids.clone(),
726            track_alloc_accesses: config.track_alloc_accesses,
727            check_alignment: config.check_alignment,
728            cmpxchg_weak_failure_rate: config.cmpxchg_weak_failure_rate,
729            preemption_rate: config.preemption_rate,
730            report_progress: config.report_progress,
731            basic_block_count: 0,
732            monotonic_clock: MonotonicClock::new(config.isolated_op == IsolatedOp::Allow),
733            #[cfg(unix)]
734            native_lib: config.native_lib.iter().map(|lib_file_path| {
735                let host_triple = rustc_session::config::host_tuple();
736                let target_triple = tcx.sess.opts.target_triple.tuple();
737                // Check if host target == the session target.
738                if host_triple != target_triple {
739                    panic!(
740                        "calling native C functions in linked .so file requires host and target to be the same: \
741                        host={host_triple}, target={target_triple}",
742                    );
743                }
744                // Note: it is the user's responsibility to provide a correct SO file.
745                // WATCH OUT: If an invalid/incorrect SO file is specified, this can cause
746                // undefined behaviour in Miri itself!
747                (
748                    unsafe {
749                        libloading::Library::new(lib_file_path)
750                            .expect("failed to read specified extern shared object file")
751                    },
752                    lib_file_path.clone(),
753                )
754            }).collect(),
755            #[cfg(not(unix))]
756            native_lib: config.native_lib.iter().map(|_| {
757                panic!("calling functions from native libraries via FFI is only supported on Unix")
758            }).collect(),
759            gc_interval: config.gc_interval,
760            since_gc: 0,
761            num_cpus: config.num_cpus,
762            page_size,
763            stack_addr,
764            stack_size,
765            collect_leak_backtraces: config.collect_leak_backtraces,
766            allocation_spans: RefCell::new(FxHashMap::default()),
767            const_cache: RefCell::new(FxHashMap::default()),
768            symbolic_alignment: RefCell::new(FxHashMap::default()),
769            union_data_ranges: FxHashMap::default(),
770            pthread_mutex_sanity: Cell::new(false),
771            pthread_rwlock_sanity: Cell::new(false),
772            pthread_condvar_sanity: Cell::new(false),
773            sb_extern_type_warned: Cell::new(false),
774            #[cfg(unix)]
775            native_call_mem_warned: Cell::new(false),
776            reject_in_isolation_warned: Default::default(),
777            int2ptr_warned: Default::default(),
778            mangle_internal_symbol_cache: Default::default(),
779            force_intrinsic_fallback: config.force_intrinsic_fallback,
780            float_nondet: config.float_nondet,
781        }
782    }
783
784    pub(crate) fn late_init(
785        ecx: &mut MiriInterpCx<'tcx>,
786        config: &MiriConfig,
787        on_main_stack_empty: StackEmptyCallback<'tcx>,
788    ) -> InterpResult<'tcx> {
789        EnvVars::init(ecx, config)?;
790        MiriMachine::init_extern_statics(ecx)?;
791        ThreadManager::init(ecx, on_main_stack_empty);
792        interp_ok(())
793    }
794
795    pub(crate) fn add_extern_static(ecx: &mut MiriInterpCx<'tcx>, name: &str, ptr: Pointer) {
796        // This got just allocated, so there definitely is a pointer here.
797        let ptr = ptr.into_pointer_or_addr().unwrap();
798        ecx.machine.extern_statics.try_insert(Symbol::intern(name), ptr).unwrap();
799    }
800
801    pub(crate) fn communicate(&self) -> bool {
802        self.isolated_op == IsolatedOp::Allow
803    }
804
805    /// Check whether the stack frame that this `FrameInfo` refers to is part of a local crate.
806    pub(crate) fn is_local(&self, frame: &FrameInfo<'_>) -> bool {
807        let def_id = frame.instance.def_id();
808        def_id.is_local() || self.local_crates.contains(&def_id.krate)
809    }
810
811    /// Called when the interpreter is going to shut down abnormally, such as due to a Ctrl-C.
812    pub(crate) fn handle_abnormal_termination(&mut self) {
813        // All strings in the profile data are stored in a single string table which is not
814        // written to disk until the profiler is dropped. If the interpreter exits without dropping
815        // the profiler, it is not possible to interpret the profile data and all measureme tools
816        // will panic when given the file.
817        drop(self.profiler.take());
818    }
819
820    pub(crate) fn page_align(&self) -> Align {
821        Align::from_bytes(self.page_size).unwrap()
822    }
823
824    pub(crate) fn allocated_span(&self, alloc_id: AllocId) -> Option<SpanData> {
825        self.allocation_spans
826            .borrow()
827            .get(&alloc_id)
828            .map(|(allocated, _deallocated)| allocated.data())
829    }
830
831    pub(crate) fn deallocated_span(&self, alloc_id: AllocId) -> Option<SpanData> {
832        self.allocation_spans
833            .borrow()
834            .get(&alloc_id)
835            .and_then(|(_allocated, deallocated)| *deallocated)
836            .map(Span::data)
837    }
838
839    fn init_allocation(
840        ecx: &MiriInterpCx<'tcx>,
841        id: AllocId,
842        kind: MemoryKind,
843        size: Size,
844        align: Align,
845    ) -> InterpResult<'tcx, AllocExtra<'tcx>> {
846        if ecx.machine.tracked_alloc_ids.contains(&id) {
847            ecx.emit_diagnostic(NonHaltingDiagnostic::CreatedAlloc(id, size, align, kind));
848        }
849
850        let borrow_tracker = ecx
851            .machine
852            .borrow_tracker
853            .as_ref()
854            .map(|bt| bt.borrow_mut().new_allocation(id, size, kind, &ecx.machine));
855
856        let data_race = match &ecx.machine.data_race {
857            GlobalDataRaceHandler::None => AllocDataRaceHandler::None,
858            GlobalDataRaceHandler::Vclocks(data_race) =>
859                AllocDataRaceHandler::Vclocks(
860                    data_race::AllocState::new_allocation(
861                        data_race,
862                        &ecx.machine.threads,
863                        size,
864                        kind,
865                        ecx.machine.current_span(),
866                    ),
867                    data_race.weak_memory.then(weak_memory::AllocState::new_allocation),
868                ),
869            GlobalDataRaceHandler::Genmc(_genmc_ctx) => {
870                // GenMC learns about new allocations directly from the alloc_addresses module,
871                // since it has to be able to control the address at which they are placed.
872                AllocDataRaceHandler::Genmc
873            }
874        };
875
876        // If an allocation is leaked, we want to report a backtrace to indicate where it was
877        // allocated. We don't need to record a backtrace for allocations which are allowed to
878        // leak.
879        let backtrace = if kind.may_leak() || !ecx.machine.collect_leak_backtraces {
880            None
881        } else {
882            Some(ecx.generate_stacktrace())
883        };
884
885        if matches!(kind, MemoryKind::Machine(kind) if kind.should_save_allocation_span()) {
886            ecx.machine
887                .allocation_spans
888                .borrow_mut()
889                .insert(id, (ecx.machine.current_span(), None));
890        }
891
892        interp_ok(AllocExtra { borrow_tracker, data_race, backtrace, sync: FxHashMap::default() })
893    }
894}
895
896impl VisitProvenance for MiriMachine<'_> {
897    fn visit_provenance(&self, visit: &mut VisitWith<'_>) {
898        #[rustfmt::skip]
899        let MiriMachine {
900            threads,
901            thread_cpu_affinity: _,
902            tls,
903            env_vars,
904            main_fn_ret_place,
905            argc,
906            argv,
907            cmd_line,
908            extern_statics,
909            dirs,
910            borrow_tracker,
911            data_race,
912            alloc_addresses,
913            fds,
914            epoll_interests:_,
915            tcx: _,
916            isolated_op: _,
917            validation: _,
918            monotonic_clock: _,
919            layouts: _,
920            static_roots: _,
921            profiler: _,
922            string_cache: _,
923            exported_symbols_cache: _,
924            backtrace_style: _,
925            local_crates: _,
926            rng: _,
927            #[cfg(target_os = "linux")]
928            allocator: _,
929            tracked_alloc_ids: _,
930            track_alloc_accesses: _,
931            check_alignment: _,
932            cmpxchg_weak_failure_rate: _,
933            preemption_rate: _,
934            report_progress: _,
935            basic_block_count: _,
936            native_lib: _,
937            gc_interval: _,
938            since_gc: _,
939            num_cpus: _,
940            page_size: _,
941            stack_addr: _,
942            stack_size: _,
943            collect_leak_backtraces: _,
944            allocation_spans: _,
945            const_cache: _,
946            symbolic_alignment: _,
947            union_data_ranges: _,
948            pthread_mutex_sanity: _,
949            pthread_rwlock_sanity: _,
950            pthread_condvar_sanity: _,
951            sb_extern_type_warned: _,
952            #[cfg(unix)]
953            native_call_mem_warned: _,
954            reject_in_isolation_warned: _,
955            int2ptr_warned: _,
956            mangle_internal_symbol_cache: _,
957            force_intrinsic_fallback: _,
958            float_nondet: _,
959        } = self;
960
961        threads.visit_provenance(visit);
962        tls.visit_provenance(visit);
963        env_vars.visit_provenance(visit);
964        dirs.visit_provenance(visit);
965        fds.visit_provenance(visit);
966        data_race.visit_provenance(visit);
967        borrow_tracker.visit_provenance(visit);
968        alloc_addresses.visit_provenance(visit);
969        main_fn_ret_place.visit_provenance(visit);
970        argc.visit_provenance(visit);
971        argv.visit_provenance(visit);
972        cmd_line.visit_provenance(visit);
973        for ptr in extern_statics.values() {
974            ptr.visit_provenance(visit);
975        }
976    }
977}
978
979/// A rustc InterpCx for Miri.
980pub type MiriInterpCx<'tcx> = InterpCx<'tcx, MiriMachine<'tcx>>;
981
982/// A little trait that's useful to be inherited by extension traits.
983pub trait MiriInterpCxExt<'tcx> {
984    fn eval_context_ref<'a>(&'a self) -> &'a MiriInterpCx<'tcx>;
985    fn eval_context_mut<'a>(&'a mut self) -> &'a mut MiriInterpCx<'tcx>;
986}
987impl<'tcx> MiriInterpCxExt<'tcx> for MiriInterpCx<'tcx> {
988    #[inline(always)]
989    fn eval_context_ref(&self) -> &MiriInterpCx<'tcx> {
990        self
991    }
992    #[inline(always)]
993    fn eval_context_mut(&mut self) -> &mut MiriInterpCx<'tcx> {
994        self
995    }
996}
997
998/// Machine hook implementations.
999impl<'tcx> Machine<'tcx> for MiriMachine<'tcx> {
1000    type MemoryKind = MiriMemoryKind;
1001    type ExtraFnVal = DynSym;
1002
1003    type FrameExtra = FrameExtra<'tcx>;
1004    type AllocExtra = AllocExtra<'tcx>;
1005
1006    type Provenance = Provenance;
1007    type ProvenanceExtra = ProvenanceExtra;
1008    type Bytes = MiriAllocBytes;
1009
1010    type MemoryMap =
1011        MonoHashMap<AllocId, (MemoryKind, Allocation<Provenance, Self::AllocExtra, Self::Bytes>)>;
1012
1013    const GLOBAL_KIND: Option<MiriMemoryKind> = Some(MiriMemoryKind::Global);
1014
1015    const PANIC_ON_ALLOC_FAIL: bool = false;
1016
1017    const TRACING_ENABLED: bool = cfg!(feature = "tracing");
1018
1019    #[inline(always)]
1020    fn enforce_alignment(ecx: &MiriInterpCx<'tcx>) -> bool {
1021        ecx.machine.check_alignment != AlignmentCheck::None
1022    }
1023
1024    #[inline(always)]
1025    fn alignment_check(
1026        ecx: &MiriInterpCx<'tcx>,
1027        alloc_id: AllocId,
1028        alloc_align: Align,
1029        alloc_kind: AllocKind,
1030        offset: Size,
1031        align: Align,
1032    ) -> Option<Misalignment> {
1033        if ecx.machine.check_alignment != AlignmentCheck::Symbolic {
1034            // Just use the built-in check.
1035            return None;
1036        }
1037        if alloc_kind != AllocKind::LiveData {
1038            // Can't have any extra info here.
1039            return None;
1040        }
1041        // Let's see which alignment we have been promised for this allocation.
1042        let (promised_offset, promised_align) = ecx
1043            .machine
1044            .symbolic_alignment
1045            .borrow()
1046            .get(&alloc_id)
1047            .copied()
1048            .unwrap_or((Size::ZERO, alloc_align));
1049        if promised_align < align {
1050            // Definitely not enough.
1051            Some(Misalignment { has: promised_align, required: align })
1052        } else {
1053            // What's the offset between us and the promised alignment?
1054            let distance = offset.bytes().wrapping_sub(promised_offset.bytes());
1055            // That must also be aligned.
1056            if distance.is_multiple_of(align.bytes()) {
1057                // All looking good!
1058                None
1059            } else {
1060                // The biggest power of two through which `distance` is divisible.
1061                let distance_pow2 = 1 << distance.trailing_zeros();
1062                Some(Misalignment {
1063                    has: Align::from_bytes(distance_pow2).unwrap(),
1064                    required: align,
1065                })
1066            }
1067        }
1068    }
1069
1070    #[inline(always)]
1071    fn enforce_validity(ecx: &MiriInterpCx<'tcx>, _layout: TyAndLayout<'tcx>) -> bool {
1072        ecx.machine.validation != ValidationMode::No
1073    }
1074    #[inline(always)]
1075    fn enforce_validity_recursively(
1076        ecx: &InterpCx<'tcx, Self>,
1077        _layout: TyAndLayout<'tcx>,
1078    ) -> bool {
1079        ecx.machine.validation == ValidationMode::Deep
1080    }
1081
1082    #[inline(always)]
1083    fn ignore_optional_overflow_checks(ecx: &MiriInterpCx<'tcx>) -> bool {
1084        !ecx.tcx.sess.overflow_checks()
1085    }
1086
1087    fn check_fn_target_features(
1088        ecx: &MiriInterpCx<'tcx>,
1089        instance: ty::Instance<'tcx>,
1090    ) -> InterpResult<'tcx> {
1091        let attrs = ecx.tcx.codegen_fn_attrs(instance.def_id());
1092        if attrs
1093            .target_features
1094            .iter()
1095            .any(|feature| !ecx.tcx.sess.target_features.contains(&feature.name))
1096        {
1097            let unavailable = attrs
1098                .target_features
1099                .iter()
1100                .filter(|&feature| {
1101                    !feature.implied && !ecx.tcx.sess.target_features.contains(&feature.name)
1102                })
1103                .fold(String::new(), |mut s, feature| {
1104                    if !s.is_empty() {
1105                        s.push_str(", ");
1106                    }
1107                    s.push_str(feature.name.as_str());
1108                    s
1109                });
1110            let msg = format!(
1111                "calling a function that requires unavailable target features: {unavailable}"
1112            );
1113            // On WASM, this is not UB, but instead gets rejected during validation of the module
1114            // (see #84988).
1115            if ecx.tcx.sess.target.is_like_wasm {
1116                throw_machine_stop!(TerminationInfo::Abort(msg));
1117            } else {
1118                throw_ub_format!("{msg}");
1119            }
1120        }
1121        interp_ok(())
1122    }
1123
1124    #[inline(always)]
1125    fn find_mir_or_eval_fn(
1126        ecx: &mut MiriInterpCx<'tcx>,
1127        instance: ty::Instance<'tcx>,
1128        abi: &FnAbi<'tcx, Ty<'tcx>>,
1129        args: &[FnArg<'tcx, Provenance>],
1130        dest: &PlaceTy<'tcx>,
1131        ret: Option<mir::BasicBlock>,
1132        unwind: mir::UnwindAction,
1133    ) -> InterpResult<'tcx, Option<(&'tcx mir::Body<'tcx>, ty::Instance<'tcx>)>> {
1134        // For foreign items, try to see if we can emulate them.
1135        if ecx.tcx.is_foreign_item(instance.def_id()) {
1136            // An external function call that does not have a MIR body. We either find MIR elsewhere
1137            // or emulate its effect.
1138            // This will be Ok(None) if we're emulating the intrinsic entirely within Miri (no need
1139            // to run extra MIR), and Ok(Some(body)) if we found MIR to run for the
1140            // foreign function
1141            // Any needed call to `goto_block` will be performed by `emulate_foreign_item`.
1142            let args = ecx.copy_fn_args(args); // FIXME: Should `InPlace` arguments be reset to uninit?
1143            let link_name = Symbol::intern(ecx.tcx.symbol_name(instance).name);
1144            return ecx.emulate_foreign_item(link_name, abi, &args, dest, ret, unwind);
1145        }
1146
1147        // Otherwise, load the MIR.
1148        interp_ok(Some((ecx.load_mir(instance.def, None)?, instance)))
1149    }
1150
1151    #[inline(always)]
1152    fn call_extra_fn(
1153        ecx: &mut MiriInterpCx<'tcx>,
1154        fn_val: DynSym,
1155        abi: &FnAbi<'tcx, Ty<'tcx>>,
1156        args: &[FnArg<'tcx, Provenance>],
1157        dest: &PlaceTy<'tcx>,
1158        ret: Option<mir::BasicBlock>,
1159        unwind: mir::UnwindAction,
1160    ) -> InterpResult<'tcx> {
1161        let args = ecx.copy_fn_args(args); // FIXME: Should `InPlace` arguments be reset to uninit?
1162        ecx.emulate_dyn_sym(fn_val, abi, &args, dest, ret, unwind)
1163    }
1164
1165    #[inline(always)]
1166    fn call_intrinsic(
1167        ecx: &mut MiriInterpCx<'tcx>,
1168        instance: ty::Instance<'tcx>,
1169        args: &[OpTy<'tcx>],
1170        dest: &PlaceTy<'tcx>,
1171        ret: Option<mir::BasicBlock>,
1172        unwind: mir::UnwindAction,
1173    ) -> InterpResult<'tcx, Option<ty::Instance<'tcx>>> {
1174        ecx.call_intrinsic(instance, args, dest, ret, unwind)
1175    }
1176
1177    #[inline(always)]
1178    fn assert_panic(
1179        ecx: &mut MiriInterpCx<'tcx>,
1180        msg: &mir::AssertMessage<'tcx>,
1181        unwind: mir::UnwindAction,
1182    ) -> InterpResult<'tcx> {
1183        ecx.assert_panic(msg, unwind)
1184    }
1185
1186    fn panic_nounwind(ecx: &mut InterpCx<'tcx, Self>, msg: &str) -> InterpResult<'tcx> {
1187        ecx.start_panic_nounwind(msg)
1188    }
1189
1190    fn unwind_terminate(
1191        ecx: &mut InterpCx<'tcx, Self>,
1192        reason: mir::UnwindTerminateReason,
1193    ) -> InterpResult<'tcx> {
1194        // Call the lang item.
1195        let panic = ecx.tcx.lang_items().get(reason.lang_item()).unwrap();
1196        let panic = ty::Instance::mono(ecx.tcx.tcx, panic);
1197        ecx.call_function(
1198            panic,
1199            ExternAbi::Rust,
1200            &[],
1201            None,
1202            ReturnContinuation::Goto { ret: None, unwind: mir::UnwindAction::Unreachable },
1203        )?;
1204        interp_ok(())
1205    }
1206
1207    #[inline(always)]
1208    fn binary_ptr_op(
1209        ecx: &MiriInterpCx<'tcx>,
1210        bin_op: mir::BinOp,
1211        left: &ImmTy<'tcx>,
1212        right: &ImmTy<'tcx>,
1213    ) -> InterpResult<'tcx, ImmTy<'tcx>> {
1214        ecx.binary_ptr_op(bin_op, left, right)
1215    }
1216
1217    #[inline(always)]
1218    fn generate_nan<F1: Float + FloatConvert<F2>, F2: Float>(
1219        ecx: &InterpCx<'tcx, Self>,
1220        inputs: &[F1],
1221    ) -> F2 {
1222        ecx.generate_nan(inputs)
1223    }
1224
1225    #[inline(always)]
1226    fn apply_float_nondet(
1227        ecx: &mut InterpCx<'tcx, Self>,
1228        val: ImmTy<'tcx>,
1229    ) -> InterpResult<'tcx, ImmTy<'tcx>> {
1230        crate::math::apply_random_float_error_to_imm(ecx, val, 2 /* log2(4) */)
1231    }
1232
1233    #[inline(always)]
1234    fn equal_float_min_max<F: Float>(ecx: &MiriInterpCx<'tcx>, a: F, b: F) -> F {
1235        ecx.equal_float_min_max(a, b)
1236    }
1237
1238    #[inline(always)]
1239    fn ub_checks(ecx: &InterpCx<'tcx, Self>) -> InterpResult<'tcx, bool> {
1240        interp_ok(ecx.tcx.sess.ub_checks())
1241    }
1242
1243    #[inline(always)]
1244    fn contract_checks(ecx: &InterpCx<'tcx, Self>) -> InterpResult<'tcx, bool> {
1245        interp_ok(ecx.tcx.sess.contract_checks())
1246    }
1247
1248    #[inline(always)]
1249    fn thread_local_static_pointer(
1250        ecx: &mut MiriInterpCx<'tcx>,
1251        def_id: DefId,
1252    ) -> InterpResult<'tcx, StrictPointer> {
1253        ecx.get_or_create_thread_local_alloc(def_id)
1254    }
1255
1256    fn extern_static_pointer(
1257        ecx: &MiriInterpCx<'tcx>,
1258        def_id: DefId,
1259    ) -> InterpResult<'tcx, StrictPointer> {
1260        let link_name = Symbol::intern(ecx.tcx.symbol_name(Instance::mono(*ecx.tcx, def_id)).name);
1261        if let Some(&ptr) = ecx.machine.extern_statics.get(&link_name) {
1262            // Various parts of the engine rely on `get_alloc_info` for size and alignment
1263            // information. That uses the type information of this static.
1264            // Make sure it matches the Miri allocation for this.
1265            let Provenance::Concrete { alloc_id, .. } = ptr.provenance else {
1266                panic!("extern_statics cannot contain wildcards")
1267            };
1268            let info = ecx.get_alloc_info(alloc_id);
1269            let def_ty = ecx.tcx.type_of(def_id).instantiate_identity();
1270            let extern_decl_layout =
1271                ecx.tcx.layout_of(ecx.typing_env().as_query_input(def_ty)).unwrap();
1272            if extern_decl_layout.size != info.size || extern_decl_layout.align.abi != info.align {
1273                throw_unsup_format!(
1274                    "extern static `{link_name}` has been declared as `{krate}::{name}` \
1275                    with a size of {decl_size} bytes and alignment of {decl_align} bytes, \
1276                    but Miri emulates it via an extern static shim \
1277                    with a size of {shim_size} bytes and alignment of {shim_align} bytes",
1278                    name = ecx.tcx.def_path_str(def_id),
1279                    krate = ecx.tcx.crate_name(def_id.krate),
1280                    decl_size = extern_decl_layout.size.bytes(),
1281                    decl_align = extern_decl_layout.align.abi.bytes(),
1282                    shim_size = info.size.bytes(),
1283                    shim_align = info.align.bytes(),
1284                )
1285            }
1286            interp_ok(ptr)
1287        } else {
1288            throw_unsup_format!("extern static `{link_name}` is not supported by Miri",)
1289        }
1290    }
1291
1292    fn init_local_allocation(
1293        ecx: &MiriInterpCx<'tcx>,
1294        id: AllocId,
1295        kind: MemoryKind,
1296        size: Size,
1297        align: Align,
1298    ) -> InterpResult<'tcx, Self::AllocExtra> {
1299        assert!(kind != MiriMemoryKind::Global.into());
1300        MiriMachine::init_allocation(ecx, id, kind, size, align)
1301    }
1302
1303    fn adjust_alloc_root_pointer(
1304        ecx: &MiriInterpCx<'tcx>,
1305        ptr: interpret::Pointer<CtfeProvenance>,
1306        kind: Option<MemoryKind>,
1307    ) -> InterpResult<'tcx, interpret::Pointer<Provenance>> {
1308        let kind = kind.expect("we set our GLOBAL_KIND so this cannot be None");
1309        let alloc_id = ptr.provenance.alloc_id();
1310        if cfg!(debug_assertions) {
1311            // The machine promises to never call us on thread-local or extern statics.
1312            match ecx.tcx.try_get_global_alloc(alloc_id) {
1313                Some(GlobalAlloc::Static(def_id)) if ecx.tcx.is_thread_local_static(def_id) => {
1314                    panic!("adjust_alloc_root_pointer called on thread-local static")
1315                }
1316                Some(GlobalAlloc::Static(def_id)) if ecx.tcx.is_foreign_item(def_id) => {
1317                    panic!("adjust_alloc_root_pointer called on extern static")
1318                }
1319                _ => {}
1320            }
1321        }
1322        // FIXME: can we somehow preserve the immutability of `ptr`?
1323        let tag = if let Some(borrow_tracker) = &ecx.machine.borrow_tracker {
1324            borrow_tracker.borrow_mut().root_ptr_tag(alloc_id, &ecx.machine)
1325        } else {
1326            // Value does not matter, SB is disabled
1327            BorTag::default()
1328        };
1329        ecx.adjust_alloc_root_pointer(ptr, tag, kind)
1330    }
1331
1332    /// Called on `usize as ptr` casts.
1333    #[inline(always)]
1334    fn ptr_from_addr_cast(ecx: &MiriInterpCx<'tcx>, addr: u64) -> InterpResult<'tcx, Pointer> {
1335        ecx.ptr_from_addr_cast(addr)
1336    }
1337
1338    /// Called on `ptr as usize` casts.
1339    /// (Actually computing the resulting `usize` doesn't need machine help,
1340    /// that's just `Scalar::try_to_int`.)
1341    #[inline(always)]
1342    fn expose_provenance(
1343        ecx: &InterpCx<'tcx, Self>,
1344        provenance: Self::Provenance,
1345    ) -> InterpResult<'tcx> {
1346        ecx.expose_provenance(provenance)
1347    }
1348
1349    /// Convert a pointer with provenance into an allocation-offset pair and extra provenance info.
1350    /// `size` says how many bytes of memory are expected at that pointer. The *sign* of `size` can
1351    /// be used to disambiguate situations where a wildcard pointer sits right in between two
1352    /// allocations.
1353    ///
1354    /// If `ptr.provenance.get_alloc_id()` is `Some(p)`, the returned `AllocId` must be `p`.
1355    /// The resulting `AllocId` will just be used for that one step and the forgotten again
1356    /// (i.e., we'll never turn the data returned here back into a `Pointer` that might be
1357    /// stored in machine state).
1358    ///
1359    /// When this fails, that means the pointer does not point to a live allocation.
1360    fn ptr_get_alloc(
1361        ecx: &MiriInterpCx<'tcx>,
1362        ptr: StrictPointer,
1363        size: i64,
1364    ) -> Option<(AllocId, Size, Self::ProvenanceExtra)> {
1365        let rel = ecx.ptr_get_alloc(ptr, size);
1366
1367        rel.map(|(alloc_id, size)| {
1368            let tag = match ptr.provenance {
1369                Provenance::Concrete { tag, .. } => ProvenanceExtra::Concrete(tag),
1370                Provenance::Wildcard => ProvenanceExtra::Wildcard,
1371            };
1372            (alloc_id, size, tag)
1373        })
1374    }
1375
1376    /// Called to adjust global allocations to the Provenance and AllocExtra of this machine.
1377    ///
1378    /// If `alloc` contains pointers, then they are all pointing to globals.
1379    ///
1380    /// This should avoid copying if no work has to be done! If this returns an owned
1381    /// allocation (because a copy had to be done to adjust things), machine memory will
1382    /// cache the result. (This relies on `AllocMap::get_or` being able to add the
1383    /// owned allocation to the map even when the map is shared.)
1384    fn adjust_global_allocation<'b>(
1385        ecx: &InterpCx<'tcx, Self>,
1386        id: AllocId,
1387        alloc: &'b Allocation,
1388    ) -> InterpResult<'tcx, Cow<'b, Allocation<Self::Provenance, Self::AllocExtra, Self::Bytes>>>
1389    {
1390        let alloc = alloc.adjust_from_tcx(
1391            &ecx.tcx,
1392            |bytes, align| ecx.get_global_alloc_bytes(id, bytes, align),
1393            |ptr| ecx.global_root_pointer(ptr),
1394        )?;
1395        let kind = MiriMemoryKind::Global.into();
1396        let extra = MiriMachine::init_allocation(ecx, id, kind, alloc.size(), alloc.align)?;
1397        interp_ok(Cow::Owned(alloc.with_extra(extra)))
1398    }
1399
1400    #[inline(always)]
1401    fn before_memory_read(
1402        _tcx: TyCtxtAt<'tcx>,
1403        machine: &Self,
1404        alloc_extra: &AllocExtra<'tcx>,
1405        ptr: Pointer,
1406        (alloc_id, prov_extra): (AllocId, Self::ProvenanceExtra),
1407        range: AllocRange,
1408    ) -> InterpResult<'tcx> {
1409        if machine.track_alloc_accesses && machine.tracked_alloc_ids.contains(&alloc_id) {
1410            machine
1411                .emit_diagnostic(NonHaltingDiagnostic::AccessedAlloc(alloc_id, AccessKind::Read));
1412        }
1413        // The order of checks is deliberate, to prefer reporting a data race over a borrow tracker error.
1414        match &machine.data_race {
1415            GlobalDataRaceHandler::None => {}
1416            GlobalDataRaceHandler::Genmc(genmc_ctx) =>
1417                genmc_ctx.memory_load(machine, ptr.addr(), range.size)?,
1418            GlobalDataRaceHandler::Vclocks(_data_race) => {
1419                let AllocDataRaceHandler::Vclocks(data_race, weak_memory) = &alloc_extra.data_race
1420                else {
1421                    unreachable!();
1422                };
1423                data_race.read(alloc_id, range, NaReadType::Read, None, machine)?;
1424                if let Some(weak_memory) = weak_memory {
1425                    weak_memory.memory_accessed(range, machine.data_race.as_vclocks_ref().unwrap());
1426                }
1427            }
1428        }
1429        if let Some(borrow_tracker) = &alloc_extra.borrow_tracker {
1430            borrow_tracker.before_memory_read(alloc_id, prov_extra, range, machine)?;
1431        }
1432        interp_ok(())
1433    }
1434
1435    #[inline(always)]
1436    fn before_memory_write(
1437        _tcx: TyCtxtAt<'tcx>,
1438        machine: &mut Self,
1439        alloc_extra: &mut AllocExtra<'tcx>,
1440        ptr: Pointer,
1441        (alloc_id, prov_extra): (AllocId, Self::ProvenanceExtra),
1442        range: AllocRange,
1443    ) -> InterpResult<'tcx> {
1444        if machine.track_alloc_accesses && machine.tracked_alloc_ids.contains(&alloc_id) {
1445            machine
1446                .emit_diagnostic(NonHaltingDiagnostic::AccessedAlloc(alloc_id, AccessKind::Write));
1447        }
1448        match &machine.data_race {
1449            GlobalDataRaceHandler::None => {}
1450            GlobalDataRaceHandler::Genmc(genmc_ctx) => {
1451                genmc_ctx.memory_store(machine, ptr.addr(), range.size)?;
1452            }
1453            GlobalDataRaceHandler::Vclocks(_global_state) => {
1454                let AllocDataRaceHandler::Vclocks(data_race, weak_memory) =
1455                    &mut alloc_extra.data_race
1456                else {
1457                    unreachable!()
1458                };
1459                data_race.write(alloc_id, range, NaWriteType::Write, None, machine)?;
1460                if let Some(weak_memory) = weak_memory {
1461                    weak_memory.memory_accessed(range, machine.data_race.as_vclocks_ref().unwrap());
1462                }
1463            }
1464        }
1465        if let Some(borrow_tracker) = &mut alloc_extra.borrow_tracker {
1466            borrow_tracker.before_memory_write(alloc_id, prov_extra, range, machine)?;
1467        }
1468        interp_ok(())
1469    }
1470
1471    #[inline(always)]
1472    fn before_memory_deallocation(
1473        _tcx: TyCtxtAt<'tcx>,
1474        machine: &mut Self,
1475        alloc_extra: &mut AllocExtra<'tcx>,
1476        ptr: Pointer,
1477        (alloc_id, prove_extra): (AllocId, Self::ProvenanceExtra),
1478        size: Size,
1479        align: Align,
1480        kind: MemoryKind,
1481    ) -> InterpResult<'tcx> {
1482        if machine.tracked_alloc_ids.contains(&alloc_id) {
1483            machine.emit_diagnostic(NonHaltingDiagnostic::FreedAlloc(alloc_id));
1484        }
1485        match &machine.data_race {
1486            GlobalDataRaceHandler::None => {}
1487            GlobalDataRaceHandler::Genmc(genmc_ctx) =>
1488                genmc_ctx.handle_dealloc(machine, ptr.addr(), size, align, kind)?,
1489            GlobalDataRaceHandler::Vclocks(_global_state) => {
1490                let data_race = alloc_extra.data_race.as_vclocks_mut().unwrap();
1491                data_race.write(
1492                    alloc_id,
1493                    alloc_range(Size::ZERO, size),
1494                    NaWriteType::Deallocate,
1495                    None,
1496                    machine,
1497                )?;
1498            }
1499        }
1500        if let Some(borrow_tracker) = &mut alloc_extra.borrow_tracker {
1501            borrow_tracker.before_memory_deallocation(alloc_id, prove_extra, size, machine)?;
1502        }
1503        if let Some((_, deallocated_at)) = machine.allocation_spans.borrow_mut().get_mut(&alloc_id)
1504        {
1505            *deallocated_at = Some(machine.current_span());
1506        }
1507        machine.free_alloc_id(alloc_id, size, align, kind);
1508        interp_ok(())
1509    }
1510
1511    #[inline(always)]
1512    fn retag_ptr_value(
1513        ecx: &mut InterpCx<'tcx, Self>,
1514        kind: mir::RetagKind,
1515        val: &ImmTy<'tcx>,
1516    ) -> InterpResult<'tcx, ImmTy<'tcx>> {
1517        if ecx.machine.borrow_tracker.is_some() {
1518            ecx.retag_ptr_value(kind, val)
1519        } else {
1520            interp_ok(val.clone())
1521        }
1522    }
1523
1524    #[inline(always)]
1525    fn retag_place_contents(
1526        ecx: &mut InterpCx<'tcx, Self>,
1527        kind: mir::RetagKind,
1528        place: &PlaceTy<'tcx>,
1529    ) -> InterpResult<'tcx> {
1530        if ecx.machine.borrow_tracker.is_some() {
1531            ecx.retag_place_contents(kind, place)?;
1532        }
1533        interp_ok(())
1534    }
1535
1536    fn protect_in_place_function_argument(
1537        ecx: &mut InterpCx<'tcx, Self>,
1538        place: &MPlaceTy<'tcx>,
1539    ) -> InterpResult<'tcx> {
1540        // If we have a borrow tracker, we also have it set up protection so that all reads *and
1541        // writes* during this call are insta-UB.
1542        let protected_place = if ecx.machine.borrow_tracker.is_some() {
1543            ecx.protect_place(place)?
1544        } else {
1545            // No borrow tracker.
1546            place.clone()
1547        };
1548        // We do need to write `uninit` so that even after the call ends, the former contents of
1549        // this place cannot be observed any more. We do the write after retagging so that for
1550        // Tree Borrows, this is considered to activate the new tag.
1551        // Conveniently this also ensures that the place actually points to suitable memory.
1552        ecx.write_uninit(&protected_place)?;
1553        // Now we throw away the protected place, ensuring its tag is never used again.
1554        interp_ok(())
1555    }
1556
1557    #[inline(always)]
1558    fn init_frame(
1559        ecx: &mut InterpCx<'tcx, Self>,
1560        frame: Frame<'tcx, Provenance>,
1561    ) -> InterpResult<'tcx, Frame<'tcx, Provenance, FrameExtra<'tcx>>> {
1562        // Start recording our event before doing anything else
1563        let timing = if let Some(profiler) = ecx.machine.profiler.as_ref() {
1564            let fn_name = frame.instance().to_string();
1565            let entry = ecx.machine.string_cache.entry(fn_name.clone());
1566            let name = entry.or_insert_with(|| profiler.alloc_string(&*fn_name));
1567
1568            Some(profiler.start_recording_interval_event_detached(
1569                *name,
1570                measureme::EventId::from_label(*name),
1571                ecx.active_thread().to_u32(),
1572            ))
1573        } else {
1574            None
1575        };
1576
1577        let borrow_tracker = ecx.machine.borrow_tracker.as_ref();
1578
1579        let extra = FrameExtra {
1580            borrow_tracker: borrow_tracker.map(|bt| bt.borrow_mut().new_frame()),
1581            catch_unwind: None,
1582            timing,
1583            is_user_relevant: ecx.machine.is_user_relevant(&frame),
1584            salt: ecx.machine.rng.borrow_mut().random_range(0..ADDRS_PER_ANON_GLOBAL),
1585            data_race: ecx
1586                .machine
1587                .data_race
1588                .as_vclocks_ref()
1589                .map(|_| data_race::FrameState::default()),
1590        };
1591
1592        interp_ok(frame.with_extra(extra))
1593    }
1594
1595    fn stack<'a>(
1596        ecx: &'a InterpCx<'tcx, Self>,
1597    ) -> &'a [Frame<'tcx, Self::Provenance, Self::FrameExtra>] {
1598        ecx.active_thread_stack()
1599    }
1600
1601    fn stack_mut<'a>(
1602        ecx: &'a mut InterpCx<'tcx, Self>,
1603    ) -> &'a mut Vec<Frame<'tcx, Self::Provenance, Self::FrameExtra>> {
1604        ecx.active_thread_stack_mut()
1605    }
1606
1607    fn before_terminator(ecx: &mut InterpCx<'tcx, Self>) -> InterpResult<'tcx> {
1608        ecx.machine.basic_block_count += 1u64; // a u64 that is only incremented by 1 will "never" overflow
1609        ecx.machine.since_gc += 1;
1610        // Possibly report our progress. This will point at the terminator we are about to execute.
1611        if let Some(report_progress) = ecx.machine.report_progress {
1612            if ecx.machine.basic_block_count.is_multiple_of(u64::from(report_progress)) {
1613                ecx.emit_diagnostic(NonHaltingDiagnostic::ProgressReport {
1614                    block_count: ecx.machine.basic_block_count,
1615                });
1616            }
1617        }
1618
1619        // Search for BorTags to find all live pointers, then remove all other tags from borrow
1620        // stacks.
1621        // When debug assertions are enabled, run the GC as often as possible so that any cases
1622        // where it mistakenly removes an important tag become visible.
1623        if ecx.machine.gc_interval > 0 && ecx.machine.since_gc >= ecx.machine.gc_interval {
1624            ecx.machine.since_gc = 0;
1625            ecx.run_provenance_gc();
1626        }
1627
1628        // These are our preemption points.
1629        // (This will only take effect after the terminator has been executed.)
1630        ecx.maybe_preempt_active_thread();
1631
1632        // Make sure some time passes.
1633        ecx.machine.monotonic_clock.tick();
1634
1635        interp_ok(())
1636    }
1637
1638    #[inline(always)]
1639    fn after_stack_push(ecx: &mut InterpCx<'tcx, Self>) -> InterpResult<'tcx> {
1640        if ecx.frame().extra.is_user_relevant {
1641            // We just pushed a local frame, so we know that the topmost local frame is the topmost
1642            // frame. If we push a non-local frame, there's no need to do anything.
1643            let stack_len = ecx.active_thread_stack().len();
1644            ecx.active_thread_mut().set_top_user_relevant_frame(stack_len - 1);
1645        }
1646        interp_ok(())
1647    }
1648
1649    fn before_stack_pop(ecx: &mut InterpCx<'tcx, Self>) -> InterpResult<'tcx> {
1650        let frame = ecx.frame();
1651        // We want this *before* the return value copy, because the return place itself is protected
1652        // until we do `on_stack_pop` here, and we need to un-protect it to copy the return value.
1653        if ecx.machine.borrow_tracker.is_some() {
1654            ecx.on_stack_pop(frame)?;
1655        }
1656        if frame.extra.is_user_relevant {
1657            // All that we store is whether or not the frame we just removed is local, so now we
1658            // have no idea where the next topmost local frame is. So we recompute it.
1659            // (If this ever becomes a bottleneck, we could have `push` store the previous
1660            // user-relevant frame and restore that here.)
1661            // We have to skip the frame that is just being popped.
1662            ecx.active_thread_mut().recompute_top_user_relevant_frame(/* skip */ 1);
1663        }
1664        // tracing-tree can autoamtically annotate scope changes, but it gets very confused by our
1665        // concurrency and what it prints is just plain wrong. So we print our own information
1666        // instead. (Cc https://github.com/rust-lang/miri/issues/2266)
1667        info!("Leaving {}", ecx.frame().instance());
1668        interp_ok(())
1669    }
1670
1671    #[inline(always)]
1672    fn after_stack_pop(
1673        ecx: &mut InterpCx<'tcx, Self>,
1674        frame: Frame<'tcx, Provenance, FrameExtra<'tcx>>,
1675        unwinding: bool,
1676    ) -> InterpResult<'tcx, ReturnAction> {
1677        let res = {
1678            // Move `frame` into a sub-scope so we control when it will be dropped.
1679            let mut frame = frame;
1680            let timing = frame.extra.timing.take();
1681            let res = ecx.handle_stack_pop_unwind(frame.extra, unwinding);
1682            if let Some(profiler) = ecx.machine.profiler.as_ref() {
1683                profiler.finish_recording_interval_event(timing.unwrap());
1684            }
1685            res
1686        };
1687        // Needs to be done after dropping frame to show up on the right nesting level.
1688        // (Cc https://github.com/rust-lang/miri/issues/2266)
1689        if !ecx.active_thread_stack().is_empty() {
1690            info!("Continuing in {}", ecx.frame().instance());
1691        }
1692        res
1693    }
1694
1695    fn after_local_read(
1696        ecx: &InterpCx<'tcx, Self>,
1697        frame: &Frame<'tcx, Provenance, FrameExtra<'tcx>>,
1698        local: mir::Local,
1699    ) -> InterpResult<'tcx> {
1700        if let Some(data_race) = &frame.extra.data_race {
1701            data_race.local_read(local, &ecx.machine);
1702        }
1703        interp_ok(())
1704    }
1705
1706    fn after_local_write(
1707        ecx: &mut InterpCx<'tcx, Self>,
1708        local: mir::Local,
1709        storage_live: bool,
1710    ) -> InterpResult<'tcx> {
1711        if let Some(data_race) = &ecx.frame().extra.data_race {
1712            data_race.local_write(local, storage_live, &ecx.machine);
1713        }
1714        interp_ok(())
1715    }
1716
1717    fn after_local_moved_to_memory(
1718        ecx: &mut InterpCx<'tcx, Self>,
1719        local: mir::Local,
1720        mplace: &MPlaceTy<'tcx>,
1721    ) -> InterpResult<'tcx> {
1722        let Some(Provenance::Concrete { alloc_id, .. }) = mplace.ptr().provenance else {
1723            panic!("after_local_allocated should only be called on fresh allocations");
1724        };
1725        // Record the span where this was allocated: the declaration of the local.
1726        let local_decl = &ecx.frame().body().local_decls[local];
1727        let span = local_decl.source_info.span;
1728        ecx.machine.allocation_spans.borrow_mut().insert(alloc_id, (span, None));
1729        // The data race system has to fix the clocks used for this write.
1730        let (alloc_info, machine) = ecx.get_alloc_extra_mut(alloc_id)?;
1731        if let Some(data_race) =
1732            &machine.threads.active_thread_stack().last().unwrap().extra.data_race
1733        {
1734            data_race.local_moved_to_memory(
1735                local,
1736                alloc_info.data_race.as_vclocks_mut().unwrap(),
1737                machine,
1738            );
1739        }
1740        interp_ok(())
1741    }
1742
1743    fn eval_mir_constant<F>(
1744        ecx: &InterpCx<'tcx, Self>,
1745        val: mir::Const<'tcx>,
1746        span: Span,
1747        layout: Option<TyAndLayout<'tcx>>,
1748        eval: F,
1749    ) -> InterpResult<'tcx, OpTy<'tcx>>
1750    where
1751        F: Fn(
1752            &InterpCx<'tcx, Self>,
1753            mir::Const<'tcx>,
1754            Span,
1755            Option<TyAndLayout<'tcx>>,
1756        ) -> InterpResult<'tcx, OpTy<'tcx>>,
1757    {
1758        let frame = ecx.active_thread_stack().last().unwrap();
1759        let mut cache = ecx.machine.const_cache.borrow_mut();
1760        match cache.entry((val, frame.extra.salt)) {
1761            Entry::Vacant(ve) => {
1762                let op = eval(ecx, val, span, layout)?;
1763                ve.insert(op.clone());
1764                interp_ok(op)
1765            }
1766            Entry::Occupied(oe) => interp_ok(oe.get().clone()),
1767        }
1768    }
1769
1770    fn get_global_alloc_salt(
1771        ecx: &InterpCx<'tcx, Self>,
1772        instance: Option<ty::Instance<'tcx>>,
1773    ) -> usize {
1774        let unique = if let Some(instance) = instance {
1775            // Functions cannot be identified by pointers, as asm-equal functions can get
1776            // deduplicated by the linker (we set the "unnamed_addr" attribute for LLVM) and
1777            // functions can be duplicated across crates. We thus generate a new `AllocId` for every
1778            // mention of a function. This means that `main as fn() == main as fn()` is false, while
1779            // `let x = main as fn(); x == x` is true. However, as a quality-of-life feature it can
1780            // be useful to identify certain functions uniquely, e.g. for backtraces. So we identify
1781            // whether codegen will actually emit duplicate functions. It does that when they have
1782            // non-lifetime generics, or when they can be inlined. All other functions are given a
1783            // unique address. This is not a stable guarantee! The `inline` attribute is a hint and
1784            // cannot be relied upon for anything. But if we don't do this, the
1785            // `__rust_begin_short_backtrace`/`__rust_end_short_backtrace` logic breaks and panic
1786            // backtraces look terrible.
1787            let is_generic = instance
1788                .args
1789                .into_iter()
1790                .any(|arg| !matches!(arg.kind(), ty::GenericArgKind::Lifetime(_)));
1791            let can_be_inlined = matches!(
1792                ecx.tcx.sess.opts.unstable_opts.cross_crate_inline_threshold,
1793                InliningThreshold::Always
1794            ) || !matches!(
1795                ecx.tcx.codegen_fn_attrs(instance.def_id()).inline,
1796                InlineAttr::Never
1797            );
1798            !is_generic && !can_be_inlined
1799        } else {
1800            // Non-functions are never unique.
1801            false
1802        };
1803        // Always use the same salt if the allocation is unique.
1804        if unique {
1805            CTFE_ALLOC_SALT
1806        } else {
1807            ecx.machine.rng.borrow_mut().random_range(0..ADDRS_PER_ANON_GLOBAL)
1808        }
1809    }
1810
1811    fn cached_union_data_range<'e>(
1812        ecx: &'e mut InterpCx<'tcx, Self>,
1813        ty: Ty<'tcx>,
1814        compute_range: impl FnOnce() -> RangeSet,
1815    ) -> Cow<'e, RangeSet> {
1816        Cow::Borrowed(ecx.machine.union_data_ranges.entry(ty).or_insert_with(compute_range))
1817    }
1818
1819    fn get_default_alloc_params(&self) -> <Self::Bytes as AllocBytes>::AllocParams {
1820        use crate::alloc::MiriAllocParams;
1821
1822        #[cfg(target_os = "linux")]
1823        match &self.allocator {
1824            Some(alloc) => MiriAllocParams::Isolated(alloc.clone()),
1825            None => MiriAllocParams::Global,
1826        }
1827        #[cfg(not(target_os = "linux"))]
1828        MiriAllocParams::Global
1829    }
1830}
1831
1832/// Trait for callbacks handling asynchronous machine operations.
1833pub trait MachineCallback<'tcx, T>: VisitProvenance {
1834    /// The function to be invoked when the callback is fired.
1835    fn call(
1836        self: Box<Self>,
1837        ecx: &mut InterpCx<'tcx, MiriMachine<'tcx>>,
1838        arg: T,
1839    ) -> InterpResult<'tcx>;
1840}
1841
1842/// Type alias for boxed machine callbacks with generic argument type.
1843pub type DynMachineCallback<'tcx, T> = Box<dyn MachineCallback<'tcx, T> + 'tcx>;
1844
1845/// Creates a `DynMachineCallback`:
1846///
1847/// ```rust
1848/// callback!(
1849///     @capture<'tcx> {
1850///         var1: Ty1,
1851///         var2: Ty2<'tcx>,
1852///     }
1853///     |this, arg: ArgTy| {
1854///         // Implement the callback here.
1855///         todo!()
1856///     }
1857/// )
1858/// ```
1859///
1860/// All the argument types must implement `VisitProvenance`.
1861#[macro_export]
1862macro_rules! callback {
1863    (@capture<$tcx:lifetime $(,)? $($lft:lifetime),*>
1864        { $($name:ident: $type:ty),* $(,)? }
1865     |$this:ident, $arg:ident: $arg_ty:ty| $body:expr $(,)?) => {{
1866        struct Callback<$tcx, $($lft),*> {
1867            $($name: $type,)*
1868            _phantom: std::marker::PhantomData<&$tcx ()>,
1869        }
1870
1871        impl<$tcx, $($lft),*> VisitProvenance for Callback<$tcx, $($lft),*> {
1872            fn visit_provenance(&self, _visit: &mut VisitWith<'_>) {
1873                $(
1874                    self.$name.visit_provenance(_visit);
1875                )*
1876            }
1877        }
1878
1879        impl<$tcx, $($lft),*> MachineCallback<$tcx, $arg_ty> for Callback<$tcx, $($lft),*> {
1880            fn call(
1881                self: Box<Self>,
1882                $this: &mut MiriInterpCx<$tcx>,
1883                $arg: $arg_ty
1884            ) -> InterpResult<$tcx> {
1885                #[allow(unused_variables)]
1886                let Callback { $($name,)* _phantom } = *self;
1887                $body
1888            }
1889        }
1890
1891        Box::new(Callback {
1892            $($name,)*
1893            _phantom: std::marker::PhantomData
1894        })
1895    }};
1896}