miri/concurrency/
data_race.rs

1//! Implementation of a data-race detector using Lamport Timestamps / Vector clocks
2//! based on the Dynamic Race Detection for C++:
3//! <https://www.doc.ic.ac.uk/~afd/homepages/papers/pdfs/2017/POPL.pdf>
4//! which does not report false-positives when fences are used, and gives better
5//! accuracy in presence of read-modify-write operations.
6//!
7//! The implementation contains modifications to correctly model the changes to the memory model in C++20
8//! regarding the weakening of release sequences: <http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2018/p0982r1.html>.
9//! Relaxed stores now unconditionally block all currently active release sequences and so per-thread tracking of release
10//! sequences is not needed.
11//!
12//! The implementation also models races with memory allocation and deallocation via treating allocation and
13//! deallocation as a type of write internally for detecting data-races.
14//!
15//! Weak memory orders are explored but not all weak behaviours are exhibited, so it can still miss data-races
16//! but should not report false-positives
17//!
18//! Data-race definition from(<https://en.cppreference.com/w/cpp/language/memory_model#Threads_and_data_races>):
19//! a data race occurs between two memory accesses if they are on different threads, at least one operation
20//! is non-atomic, at least one operation is a write and neither access happens-before the other. Read the link
21//! for full definition.
22//!
23//! This re-uses vector indexes for threads that are known to be unable to report data-races, this is valid
24//! because it only re-uses vector indexes once all currently-active (not-terminated) threads have an internal
25//! vector clock that happens-after the join operation of the candidate thread. Threads that have not been joined
26//! on are not considered. Since the thread's vector clock will only increase and a data-race implies that
27//! there is some index x where `clock[x] > thread_clock`, when this is true `clock[candidate-idx] > thread_clock`
28//! can never hold and hence a data-race can never be reported in that vector index again.
29//! This means that the thread-index can be safely re-used, starting on the next timestamp for the newly created
30//! thread.
31//!
32//! The timestamps used in the data-race detector assign each sequence of non-atomic operations
33//! followed by a single atomic or concurrent operation a single timestamp.
34//! Write, Read, Write, ThreadJoin will be represented by a single timestamp value on a thread.
35//! This is because extra increment operations between the operations in the sequence are not
36//! required for accurate reporting of data-race values.
37//!
38//! As per the paper a threads timestamp is only incremented after a release operation is performed
39//! so some atomic operations that only perform acquires do not increment the timestamp. Due to shared
40//! code some atomic operations may increment the timestamp when not necessary but this has no effect
41//! on the data-race detection code.
42
43use std::cell::{Cell, Ref, RefCell, RefMut};
44use std::fmt::Debug;
45use std::mem;
46
47use rustc_abi::{Align, HasDataLayout, Size};
48use rustc_ast::Mutability;
49use rustc_data_structures::fx::{FxHashMap, FxHashSet};
50use rustc_index::{Idx, IndexVec};
51use rustc_log::tracing;
52use rustc_middle::mir;
53use rustc_middle::ty::Ty;
54use rustc_span::Span;
55
56use super::vector_clock::{VClock, VTimestamp, VectorIdx};
57use super::weak_memory::EvalContextExt as _;
58use crate::concurrency::GlobalDataRaceHandler;
59use crate::diagnostics::RacingOp;
60use crate::intrinsics::AtomicRmwOp;
61use crate::*;
62
63pub type AllocState = VClockAlloc;
64
65/// Valid atomic read-write orderings, alias of atomic::Ordering (not non-exhaustive).
66#[derive(Copy, Clone, PartialEq, Eq, Debug)]
67pub enum AtomicRwOrd {
68    Relaxed,
69    Acquire,
70    Release,
71    AcqRel,
72    SeqCst,
73}
74
75/// Valid atomic read orderings, subset of atomic::Ordering.
76#[derive(Copy, Clone, PartialEq, Eq, Debug)]
77pub enum AtomicReadOrd {
78    Relaxed,
79    Acquire,
80    SeqCst,
81}
82
83/// Valid atomic write orderings, subset of atomic::Ordering.
84#[derive(Copy, Clone, PartialEq, Eq, Debug)]
85pub enum AtomicWriteOrd {
86    Relaxed,
87    Release,
88    SeqCst,
89}
90
91/// Valid atomic fence orderings, subset of atomic::Ordering.
92#[derive(Copy, Clone, PartialEq, Eq, Debug)]
93pub enum AtomicFenceOrd {
94    Acquire,
95    Release,
96    AcqRel,
97    SeqCst,
98}
99
100/// The current set of vector clocks describing the state
101/// of a thread, contains the happens-before clock and
102/// additional metadata to model atomic fence operations.
103#[derive(Clone, Default, Debug)]
104pub(super) struct ThreadClockSet {
105    /// The increasing clock representing timestamps
106    /// that happen-before this thread.
107    pub(super) clock: VClock,
108
109    /// The set of timestamps that will happen-before this
110    /// thread once it performs an acquire fence.
111    fence_acquire: VClock,
112
113    /// The last timestamp of happens-before relations that
114    /// have been released by this thread by a release fence.
115    fence_release: VClock,
116
117    /// Timestamps of the last SC write performed by each
118    /// thread, updated when this thread performs an SC fence.
119    /// This is never acquired into the thread's clock, it
120    /// just limits which old writes can be seen in weak memory emulation.
121    pub(super) write_seqcst: VClock,
122
123    /// Timestamps of the last SC fence performed by each
124    /// thread, updated when this thread performs an SC read.
125    /// This is never acquired into the thread's clock, it
126    /// just limits which old writes can be seen in weak memory emulation.
127    pub(super) read_seqcst: VClock,
128}
129
130impl ThreadClockSet {
131    /// Apply the effects of a release fence to this
132    /// set of thread vector clocks.
133    #[inline]
134    fn apply_release_fence(&mut self) {
135        self.fence_release.clone_from(&self.clock);
136    }
137
138    /// Apply the effects of an acquire fence to this
139    /// set of thread vector clocks.
140    #[inline]
141    fn apply_acquire_fence(&mut self) {
142        self.clock.join(&self.fence_acquire);
143    }
144
145    /// Increment the happens-before clock at a
146    /// known index.
147    #[inline]
148    fn increment_clock(&mut self, index: VectorIdx, current_span: Span) {
149        self.clock.increment_index(index, current_span);
150    }
151
152    /// Join the happens-before clock with that of
153    /// another thread, used to model thread join
154    /// operations.
155    fn join_with(&mut self, other: &ThreadClockSet) {
156        self.clock.join(&other.clock);
157    }
158}
159
160/// Error returned by finding a data race
161/// should be elaborated upon.
162#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)]
163pub struct DataRace;
164
165/// Externally stored memory cell clocks
166/// explicitly to reduce memory usage for the
167/// common case where no atomic operations
168/// exists on the memory cell.
169#[derive(Clone, PartialEq, Eq, Debug)]
170struct AtomicMemoryCellClocks {
171    /// The clock-vector of the timestamp of the last atomic
172    /// read operation performed by each thread.
173    /// This detects potential data-races between atomic read
174    /// and non-atomic write operations.
175    read_vector: VClock,
176
177    /// The clock-vector of the timestamp of the last atomic
178    /// write operation performed by each thread.
179    /// This detects potential data-races between atomic write
180    /// and non-atomic read or write operations.
181    write_vector: VClock,
182
183    /// Synchronization vector for acquire-release semantics
184    /// contains the vector of timestamps that will
185    /// happen-before a thread if an acquire-load is
186    /// performed on the data.
187    ///
188    /// With weak memory emulation, this is the clock of the most recent write. It is then only used
189    /// for release sequences, to integrate the most recent clock into the next one for RMWs.
190    sync_vector: VClock,
191
192    /// The size of accesses to this atomic location.
193    /// We use this to detect non-synchronized mixed-size accesses. Since all accesses must be
194    /// aligned to their size, this is sufficient to detect imperfectly overlapping accesses.
195    /// `None` indicates that we saw multiple different sizes, which is okay as long as all accesses are reads.
196    size: Option<Size>,
197}
198
199#[derive(Copy, Clone, PartialEq, Eq, Debug)]
200enum AtomicAccessType {
201    Load(AtomicReadOrd),
202    Store,
203    Rmw,
204}
205
206/// Type of a non-atomic read operation.
207#[derive(Copy, Clone, PartialEq, Eq, Debug)]
208pub enum NaReadType {
209    /// Standard unsynchronized write.
210    Read,
211
212    // An implicit read generated by a retag.
213    Retag,
214}
215
216impl NaReadType {
217    fn description(self) -> &'static str {
218        match self {
219            NaReadType::Read => "non-atomic read",
220            NaReadType::Retag => "retag read",
221        }
222    }
223}
224
225/// Type of a non-atomic write operation: allocating memory, non-atomic writes, and
226/// deallocating memory are all treated as writes for the purpose of the data-race detector.
227#[derive(Copy, Clone, PartialEq, Eq, Debug)]
228pub enum NaWriteType {
229    /// Allocate memory.
230    Allocate,
231
232    /// Standard unsynchronized write.
233    Write,
234
235    // An implicit write generated by a retag.
236    Retag,
237
238    /// Deallocate memory.
239    /// Note that when memory is deallocated first, later non-atomic accesses
240    /// will be reported as use-after-free, not as data races.
241    /// (Same for `Allocate` above.)
242    Deallocate,
243}
244
245impl NaWriteType {
246    fn description(self) -> &'static str {
247        match self {
248            NaWriteType::Allocate => "creating a new allocation",
249            NaWriteType::Write => "non-atomic write",
250            NaWriteType::Retag => "retag write",
251            NaWriteType::Deallocate => "deallocation",
252        }
253    }
254}
255
256#[derive(Copy, Clone, PartialEq, Eq, Debug)]
257enum AccessType {
258    NaRead(NaReadType),
259    NaWrite(NaWriteType),
260    AtomicLoad,
261    AtomicStore,
262    AtomicRmw,
263}
264
265/// Per-byte vector clock metadata for data-race detection.
266#[derive(Clone, PartialEq, Eq, Debug)]
267struct MemoryCellClocks {
268    /// The vector clock timestamp and the thread that did the last non-atomic write. We don't need
269    /// a full `VClock` here, it's always a single thread and nothing synchronizes, so the effective
270    /// clock is all-0 except for the thread that did the write.
271    write: (VectorIdx, VTimestamp),
272
273    /// The type of operation that the write index represents,
274    /// either newly allocated memory, a non-atomic write or
275    /// a deallocation of memory.
276    write_type: NaWriteType,
277
278    /// The vector clock of all non-atomic reads that happened since the last non-atomic write
279    /// (i.e., we join together the "singleton" clocks corresponding to each read). It is reset to
280    /// zero on each write operation.
281    read: VClock,
282
283    /// Atomic access tracking clocks.
284    /// For non-atomic memory this value is set to None.
285    /// For atomic memory, each byte carries this information.
286    atomic_ops: Option<Box<AtomicMemoryCellClocks>>,
287}
288
289/// Extra metadata associated with a thread.
290#[derive(Debug, Clone, Default)]
291struct ThreadExtraState {
292    /// The current vector index in use by the
293    /// thread currently, this is set to None
294    /// after the vector index has been re-used
295    /// and hence the value will never need to be
296    /// read during data-race reporting.
297    vector_index: Option<VectorIdx>,
298
299    /// Thread termination vector clock, this
300    /// is set on thread termination and is used
301    /// for joining on threads since the vector_index
302    /// may be re-used when the join operation occurs.
303    termination_vector_clock: Option<VClock>,
304}
305
306/// Global data-race detection state, contains the currently
307/// executing thread as well as the vector clocks associated
308/// with each of the threads.
309// FIXME: it is probably better to have one large RefCell, than to have so many small ones.
310#[derive(Debug, Clone)]
311pub struct GlobalState {
312    /// Set to true once the first additional
313    /// thread has launched, due to the dependency
314    /// between before and after a thread launch.
315    /// Any data-races must be recorded after this
316    /// so concurrent execution can ignore recording
317    /// any data-races.
318    multi_threaded: Cell<bool>,
319
320    /// A flag to mark we are currently performing
321    /// a data race free action (such as atomic access)
322    /// to suppress the race detector
323    ongoing_action_data_race_free: Cell<bool>,
324
325    /// Mapping of a vector index to a known set of thread
326    /// clocks, this is not directly mapping from a thread id
327    /// since it may refer to multiple threads.
328    vector_clocks: RefCell<IndexVec<VectorIdx, ThreadClockSet>>,
329
330    /// Mapping of a given vector index to the current thread
331    /// that the execution is representing, this may change
332    /// if a vector index is re-assigned to a new thread.
333    vector_info: RefCell<IndexVec<VectorIdx, ThreadId>>,
334
335    /// The mapping of a given thread to associated thread metadata.
336    thread_info: RefCell<IndexVec<ThreadId, ThreadExtraState>>,
337
338    /// Potential vector indices that could be re-used on thread creation
339    /// values are inserted here on after the thread has terminated and
340    /// been joined with, and hence may potentially become free
341    /// for use as the index for a new thread.
342    /// Elements in this set may still require the vector index to
343    /// report data-races, and can only be re-used after all
344    /// active vector clocks catch up with the threads timestamp.
345    reuse_candidates: RefCell<FxHashSet<VectorIdx>>,
346
347    /// We make SC fences act like RMWs on a global location.
348    /// To implement that, they all release and acquire into this clock.
349    last_sc_fence: RefCell<VClock>,
350
351    /// The timestamp of last SC write performed by each thread.
352    /// Threads only update their own index here!
353    last_sc_write_per_thread: RefCell<VClock>,
354
355    /// Track when an outdated (weak memory) load happens.
356    pub track_outdated_loads: bool,
357
358    /// Whether weak memory emulation is enabled
359    pub weak_memory: bool,
360}
361
362impl VisitProvenance for GlobalState {
363    fn visit_provenance(&self, _visit: &mut VisitWith<'_>) {
364        // We don't have any tags.
365    }
366}
367
368impl AccessType {
369    fn description(self, ty: Option<Ty<'_>>, size: Option<Size>) -> String {
370        let mut msg = String::new();
371
372        if let Some(size) = size {
373            if size == Size::ZERO {
374                // In this case there were multiple read accesss with different sizes and then a write.
375                // We will be reporting *one* of the other reads, but we don't have enough information
376                // to determine which one had which size.
377                assert!(self == AccessType::AtomicLoad);
378                assert!(ty.is_none());
379                return format!("multiple differently-sized atomic loads, including one load");
380            }
381            msg.push_str(&format!("{}-byte {}", size.bytes(), msg))
382        }
383
384        msg.push_str(match self {
385            AccessType::NaRead(w) => w.description(),
386            AccessType::NaWrite(w) => w.description(),
387            AccessType::AtomicLoad => "atomic load",
388            AccessType::AtomicStore => "atomic store",
389            AccessType::AtomicRmw => "atomic read-modify-write",
390        });
391
392        if let Some(ty) = ty {
393            msg.push_str(&format!(" of type `{ty}`"));
394        }
395
396        msg
397    }
398
399    fn is_atomic(self) -> bool {
400        match self {
401            AccessType::AtomicLoad | AccessType::AtomicStore | AccessType::AtomicRmw => true,
402            AccessType::NaRead(_) | AccessType::NaWrite(_) => false,
403        }
404    }
405
406    fn is_read(self) -> bool {
407        match self {
408            AccessType::AtomicLoad | AccessType::NaRead(_) => true,
409            AccessType::NaWrite(_) | AccessType::AtomicStore | AccessType::AtomicRmw => false,
410        }
411    }
412
413    fn is_retag(self) -> bool {
414        matches!(
415            self,
416            AccessType::NaRead(NaReadType::Retag) | AccessType::NaWrite(NaWriteType::Retag)
417        )
418    }
419}
420
421impl AtomicMemoryCellClocks {
422    fn new(size: Size) -> Self {
423        AtomicMemoryCellClocks {
424            read_vector: Default::default(),
425            write_vector: Default::default(),
426            sync_vector: Default::default(),
427            size: Some(size),
428        }
429    }
430}
431
432impl MemoryCellClocks {
433    /// Create a new set of clocks representing memory allocated
434    ///  at a given vector timestamp and index.
435    fn new(alloc: VTimestamp, alloc_index: VectorIdx) -> Self {
436        MemoryCellClocks {
437            read: VClock::default(),
438            write: (alloc_index, alloc),
439            write_type: NaWriteType::Allocate,
440            atomic_ops: None,
441        }
442    }
443
444    #[inline]
445    fn write_was_before(&self, other: &VClock) -> bool {
446        // This is the same as `self.write() <= other` but
447        // without actually manifesting a clock for `self.write`.
448        self.write.1 <= other[self.write.0]
449    }
450
451    #[inline]
452    fn write(&self) -> VClock {
453        VClock::new_with_index(self.write.0, self.write.1)
454    }
455
456    /// Load the internal atomic memory cells if they exist.
457    #[inline]
458    fn atomic(&self) -> Option<&AtomicMemoryCellClocks> {
459        self.atomic_ops.as_deref()
460    }
461
462    /// Load the internal atomic memory cells if they exist.
463    #[inline]
464    fn atomic_mut_unwrap(&mut self) -> &mut AtomicMemoryCellClocks {
465        self.atomic_ops.as_deref_mut().unwrap()
466    }
467
468    /// Load or create the internal atomic memory metadata if it does not exist. Also ensures we do
469    /// not do mixed-size atomic accesses, and updates the recorded atomic access size.
470    fn atomic_access(
471        &mut self,
472        thread_clocks: &ThreadClockSet,
473        size: Size,
474        write: bool,
475    ) -> Result<&mut AtomicMemoryCellClocks, DataRace> {
476        match self.atomic_ops {
477            Some(ref mut atomic) => {
478                // We are good if the size is the same or all atomic accesses are before our current time.
479                if atomic.size == Some(size) {
480                    Ok(atomic)
481                } else if atomic.read_vector <= thread_clocks.clock
482                    && atomic.write_vector <= thread_clocks.clock
483                {
484                    // We are fully ordered after all previous accesses, so we can change the size.
485                    atomic.size = Some(size);
486                    Ok(atomic)
487                } else if !write && atomic.write_vector <= thread_clocks.clock {
488                    // This is a read, and it is ordered after the last write. It's okay for the
489                    // sizes to mismatch, as long as no writes with a different size occur later.
490                    atomic.size = None;
491                    Ok(atomic)
492                } else {
493                    Err(DataRace)
494                }
495            }
496            None => {
497                self.atomic_ops = Some(Box::new(AtomicMemoryCellClocks::new(size)));
498                Ok(self.atomic_ops.as_mut().unwrap())
499            }
500        }
501    }
502
503    /// Update memory cell data-race tracking for atomic
504    /// load acquire semantics, is a no-op if this memory was
505    /// not used previously as atomic memory.
506    fn load_acquire(
507        &mut self,
508        thread_clocks: &mut ThreadClockSet,
509        index: VectorIdx,
510        access_size: Size,
511        sync_clock: Option<&VClock>,
512    ) -> Result<(), DataRace> {
513        self.atomic_read_detect(thread_clocks, index, access_size)?;
514        if let Some(sync_clock) = sync_clock.or_else(|| self.atomic().map(|a| &a.sync_vector)) {
515            thread_clocks.clock.join(sync_clock);
516        }
517        Ok(())
518    }
519
520    /// Update memory cell data-race tracking for atomic
521    /// load relaxed semantics, is a no-op if this memory was
522    /// not used previously as atomic memory.
523    fn load_relaxed(
524        &mut self,
525        thread_clocks: &mut ThreadClockSet,
526        index: VectorIdx,
527        access_size: Size,
528        sync_clock: Option<&VClock>,
529    ) -> Result<(), DataRace> {
530        self.atomic_read_detect(thread_clocks, index, access_size)?;
531        if let Some(sync_clock) = sync_clock.or_else(|| self.atomic().map(|a| &a.sync_vector)) {
532            thread_clocks.fence_acquire.join(sync_clock);
533        }
534        Ok(())
535    }
536
537    /// Update the memory cell data-race tracking for atomic
538    /// store release semantics.
539    fn store_release(
540        &mut self,
541        thread_clocks: &ThreadClockSet,
542        index: VectorIdx,
543        access_size: Size,
544    ) -> Result<(), DataRace> {
545        self.atomic_write_detect(thread_clocks, index, access_size)?;
546        let atomic = self.atomic_mut_unwrap(); // initialized by `atomic_write_detect`
547        atomic.sync_vector.clone_from(&thread_clocks.clock);
548        Ok(())
549    }
550
551    /// Update the memory cell data-race tracking for atomic
552    /// store relaxed semantics.
553    fn store_relaxed(
554        &mut self,
555        thread_clocks: &ThreadClockSet,
556        index: VectorIdx,
557        access_size: Size,
558    ) -> Result<(), DataRace> {
559        self.atomic_write_detect(thread_clocks, index, access_size)?;
560
561        // The handling of release sequences was changed in C++20 and so
562        // the code here is different to the paper since now all relaxed
563        // stores block release sequences. The exception for same-thread
564        // relaxed stores has been removed. We always overwrite the `sync_vector`,
565        // meaning the previous release sequence is broken.
566        let atomic = self.atomic_mut_unwrap();
567        atomic.sync_vector.clone_from(&thread_clocks.fence_release);
568        Ok(())
569    }
570
571    /// Update the memory cell data-race tracking for atomic
572    /// store release semantics for RMW operations.
573    fn rmw_release(
574        &mut self,
575        thread_clocks: &ThreadClockSet,
576        index: VectorIdx,
577        access_size: Size,
578    ) -> Result<(), DataRace> {
579        self.atomic_write_detect(thread_clocks, index, access_size)?;
580        let atomic = self.atomic_mut_unwrap();
581        // This *joining* of `sync_vector` implements release sequences: future
582        // reads of this location will acquire our clock *and* what was here before.
583        atomic.sync_vector.join(&thread_clocks.clock);
584        Ok(())
585    }
586
587    /// Update the memory cell data-race tracking for atomic
588    /// store relaxed semantics for RMW operations.
589    fn rmw_relaxed(
590        &mut self,
591        thread_clocks: &ThreadClockSet,
592        index: VectorIdx,
593        access_size: Size,
594    ) -> Result<(), DataRace> {
595        self.atomic_write_detect(thread_clocks, index, access_size)?;
596        let atomic = self.atomic_mut_unwrap();
597        // This *joining* of `sync_vector` implements release sequences: future
598        // reads of this location will acquire our fence clock *and* what was here before.
599        atomic.sync_vector.join(&thread_clocks.fence_release);
600        Ok(())
601    }
602
603    /// Detect data-races with an atomic read, caused by a non-atomic write that does
604    /// not happen-before the atomic-read.
605    fn atomic_read_detect(
606        &mut self,
607        thread_clocks: &ThreadClockSet,
608        index: VectorIdx,
609        access_size: Size,
610    ) -> Result<(), DataRace> {
611        trace!("Atomic read with vectors: {:#?} :: {:#?}", self, thread_clocks);
612        let atomic = self.atomic_access(thread_clocks, access_size, /*write*/ false)?;
613        atomic.read_vector.set_at_index(&thread_clocks.clock, index);
614        // Make sure the last non-atomic write was before this access.
615        if self.write_was_before(&thread_clocks.clock) { Ok(()) } else { Err(DataRace) }
616    }
617
618    /// Detect data-races with an atomic write, either with a non-atomic read or with
619    /// a non-atomic write.
620    fn atomic_write_detect(
621        &mut self,
622        thread_clocks: &ThreadClockSet,
623        index: VectorIdx,
624        access_size: Size,
625    ) -> Result<(), DataRace> {
626        trace!("Atomic write with vectors: {:#?} :: {:#?}", self, thread_clocks);
627        let atomic = self.atomic_access(thread_clocks, access_size, /*write*/ true)?;
628        atomic.write_vector.set_at_index(&thread_clocks.clock, index);
629        // Make sure the last non-atomic write and all non-atomic reads were before this access.
630        if self.write_was_before(&thread_clocks.clock) && self.read <= thread_clocks.clock {
631            Ok(())
632        } else {
633            Err(DataRace)
634        }
635    }
636
637    /// Detect races for non-atomic read operations at the current memory cell
638    /// returns true if a data-race is detected.
639    fn read_race_detect(
640        &mut self,
641        thread_clocks: &mut ThreadClockSet,
642        index: VectorIdx,
643        read_type: NaReadType,
644        current_span: Span,
645    ) -> Result<(), DataRace> {
646        trace!("Unsynchronized read with vectors: {:#?} :: {:#?}", self, thread_clocks);
647        if !current_span.is_dummy() {
648            thread_clocks.clock.index_mut(index).span = current_span;
649        }
650        thread_clocks.clock.index_mut(index).set_read_type(read_type);
651        // Check synchronization with non-atomic writes.
652        if !self.write_was_before(&thread_clocks.clock) {
653            return Err(DataRace);
654        }
655        // Check synchronization with atomic writes.
656        if !self.atomic().is_none_or(|atomic| atomic.write_vector <= thread_clocks.clock) {
657            return Err(DataRace);
658        }
659        // Record this access.
660        self.read.set_at_index(&thread_clocks.clock, index);
661        Ok(())
662    }
663
664    /// Detect races for non-atomic write operations at the current memory cell
665    /// returns true if a data-race is detected.
666    fn write_race_detect(
667        &mut self,
668        thread_clocks: &mut ThreadClockSet,
669        index: VectorIdx,
670        write_type: NaWriteType,
671        current_span: Span,
672    ) -> Result<(), DataRace> {
673        trace!("Unsynchronized write with vectors: {:#?} :: {:#?}", self, thread_clocks);
674        if !current_span.is_dummy() {
675            thread_clocks.clock.index_mut(index).span = current_span;
676        }
677        // Check synchronization with non-atomic accesses.
678        if !(self.write_was_before(&thread_clocks.clock) && self.read <= thread_clocks.clock) {
679            return Err(DataRace);
680        }
681        // Check synchronization with atomic accesses.
682        if !self.atomic().is_none_or(|atomic| {
683            atomic.write_vector <= thread_clocks.clock && atomic.read_vector <= thread_clocks.clock
684        }) {
685            return Err(DataRace);
686        }
687        // Record this access.
688        self.write = (index, thread_clocks.clock[index]);
689        self.write_type = write_type;
690        self.read.set_zero_vector();
691        // This is not an atomic location any more.
692        self.atomic_ops = None;
693        Ok(())
694    }
695}
696
697impl GlobalDataRaceHandler {
698    /// Select whether data race checking is disabled. This is solely an
699    /// implementation detail of `allow_data_races_*` and must not be used anywhere else!
700    fn set_ongoing_action_data_race_free(&self, enable: bool) {
701        match self {
702            GlobalDataRaceHandler::None => {}
703            GlobalDataRaceHandler::Vclocks(data_race) => {
704                let old = data_race.ongoing_action_data_race_free.replace(enable);
705                assert_ne!(old, enable, "cannot nest allow_data_races");
706            }
707            GlobalDataRaceHandler::Genmc(genmc_ctx) => {
708                genmc_ctx.set_ongoing_action_data_race_free(enable);
709            }
710        }
711    }
712}
713
714/// Evaluation context extensions.
715impl<'tcx> EvalContextExt<'tcx> for MiriInterpCx<'tcx> {}
716pub trait EvalContextExt<'tcx>: MiriInterpCxExt<'tcx> {
717    /// Perform an atomic read operation at the memory location.
718    fn read_scalar_atomic(
719        &self,
720        place: &MPlaceTy<'tcx>,
721        atomic: AtomicReadOrd,
722    ) -> InterpResult<'tcx, Scalar> {
723        let this = self.eval_context_ref();
724        this.atomic_access_check(place, AtomicAccessType::Load(atomic))?;
725        // This will read from the last store in the modification order of this location. In case
726        // weak memory emulation is enabled, this may not be the store we will pick to actually read from and return.
727        // This is fine with StackedBorrow and race checks because they don't concern metadata on
728        // the *value* (including the associated provenance if this is an AtomicPtr) at this location.
729        // Only metadata on the location itself is used.
730
731        if let Some(genmc_ctx) = this.machine.data_race.as_genmc_ref() {
732            let old_val = this.run_for_validation_ref(|this| this.read_scalar(place)).discard_err();
733            return genmc_ctx.atomic_load(
734                this,
735                place.ptr().addr(),
736                place.layout.size,
737                atomic,
738                old_val,
739            );
740        }
741
742        let scalar = this.allow_data_races_ref(move |this| this.read_scalar(place))?;
743        let buffered_scalar = this.buffered_atomic_read(place, atomic, scalar, |sync_clock| {
744            this.validate_atomic_load(place, atomic, sync_clock)
745        })?;
746        interp_ok(buffered_scalar.ok_or_else(|| err_ub!(InvalidUninitBytes(None)))?)
747    }
748
749    /// Perform an atomic write operation at the memory location.
750    fn write_scalar_atomic(
751        &mut self,
752        val: Scalar,
753        dest: &MPlaceTy<'tcx>,
754        atomic: AtomicWriteOrd,
755    ) -> InterpResult<'tcx> {
756        let this = self.eval_context_mut();
757        this.atomic_access_check(dest, AtomicAccessType::Store)?;
758
759        // Inform GenMC about the atomic store.
760        if let Some(genmc_ctx) = this.machine.data_race.as_genmc_ref() {
761            let old_val = this.run_for_validation_ref(|this| this.read_scalar(dest)).discard_err();
762            if genmc_ctx.atomic_store(
763                this,
764                dest.ptr().addr(),
765                dest.layout.size,
766                val,
767                old_val,
768                atomic,
769            )? {
770                // The store might be the latest store in coherence order (determined by GenMC).
771                // If it is, we need to update the value in Miri's memory:
772                this.allow_data_races_mut(|this| this.write_scalar(val, dest))?;
773            }
774            return interp_ok(());
775        }
776
777        // Read the previous value so we can put it in the store buffer later.
778        let old_val = this.get_latest_nonatomic_val(dest);
779        this.allow_data_races_mut(move |this| this.write_scalar(val, dest))?;
780        this.validate_atomic_store(dest, atomic)?;
781        this.buffered_atomic_write(val, dest, atomic, old_val)
782    }
783
784    /// Perform an atomic RMW operation on a memory location.
785    fn atomic_rmw_op_immediate(
786        &mut self,
787        place: &MPlaceTy<'tcx>,
788        rhs: &ImmTy<'tcx>,
789        atomic_op: AtomicRmwOp,
790        ord: AtomicRwOrd,
791    ) -> InterpResult<'tcx, ImmTy<'tcx>> {
792        let this = self.eval_context_mut();
793        this.atomic_access_check(place, AtomicAccessType::Rmw)?;
794
795        let old = this.allow_data_races_mut(|this| this.read_immediate(place))?;
796
797        // Inform GenMC about the atomic rmw operation.
798        if let Some(genmc_ctx) = this.machine.data_race.as_genmc_ref() {
799            let (old_val, new_val) = genmc_ctx.atomic_rmw_op(
800                this,
801                place.ptr().addr(),
802                place.layout.size,
803                atomic_op,
804                place.layout.backend_repr.is_signed(),
805                ord,
806                rhs.to_scalar(),
807                old.to_scalar(),
808            )?;
809            if let Some(new_val) = new_val {
810                this.allow_data_races_mut(|this| this.write_scalar(new_val, place))?;
811            }
812            return interp_ok(ImmTy::from_scalar(old_val, old.layout));
813        }
814
815        let val = match atomic_op {
816            AtomicRmwOp::MirOp { op, neg } => {
817                let val = this.binary_op(op, &old, rhs)?;
818                if neg { this.unary_op(mir::UnOp::Not, &val)? } else { val }
819            }
820            AtomicRmwOp::Max => {
821                let lt = this.binary_op(mir::BinOp::Lt, &old, rhs)?.to_scalar().to_bool()?;
822                if lt { rhs } else { &old }.clone()
823            }
824            AtomicRmwOp::Min => {
825                let lt = this.binary_op(mir::BinOp::Lt, &old, rhs)?.to_scalar().to_bool()?;
826                if lt { &old } else { rhs }.clone()
827            }
828        };
829
830        this.allow_data_races_mut(|this| this.write_immediate(*val, place))?;
831
832        this.validate_atomic_rmw(place, ord)?;
833
834        this.buffered_atomic_rmw(val.to_scalar(), place, ord, old.to_scalar())?;
835        interp_ok(old)
836    }
837
838    /// Perform an atomic exchange with a memory place and a new
839    /// scalar value, the old value is returned.
840    fn atomic_exchange_scalar(
841        &mut self,
842        place: &MPlaceTy<'tcx>,
843        new: Scalar,
844        atomic: AtomicRwOrd,
845    ) -> InterpResult<'tcx, Scalar> {
846        let this = self.eval_context_mut();
847        this.atomic_access_check(place, AtomicAccessType::Rmw)?;
848
849        let old = this.allow_data_races_mut(|this| this.read_scalar(place))?;
850        this.allow_data_races_mut(|this| this.write_scalar(new, place))?;
851
852        // Inform GenMC about the atomic atomic exchange.
853        if let Some(genmc_ctx) = this.machine.data_race.as_genmc_ref() {
854            let (old_val, new_val) = genmc_ctx.atomic_exchange(
855                this,
856                place.ptr().addr(),
857                place.layout.size,
858                new,
859                atomic,
860                old,
861            )?;
862            // The store might be the latest store in coherence order (determined by GenMC).
863            // If it is, we need to update the value in Miri's memory:
864            if let Some(new_val) = new_val {
865                this.allow_data_races_mut(|this| this.write_scalar(new_val, place))?;
866            }
867            return interp_ok(old_val);
868        }
869
870        this.validate_atomic_rmw(place, atomic)?;
871
872        this.buffered_atomic_rmw(new, place, atomic, old)?;
873        interp_ok(old)
874    }
875
876    /// Perform an atomic compare and exchange at a given memory location.
877    /// On success an atomic RMW operation is performed and on failure
878    /// only an atomic read occurs. If `can_fail_spuriously` is true,
879    /// then we treat it as a "compare_exchange_weak" operation, and
880    /// some portion of the time fail even when the values are actually
881    /// identical.
882    fn atomic_compare_exchange_scalar(
883        &mut self,
884        place: &MPlaceTy<'tcx>,
885        expect_old: &ImmTy<'tcx>,
886        new: Scalar,
887        success: AtomicRwOrd,
888        fail: AtomicReadOrd,
889        can_fail_spuriously: bool,
890    ) -> InterpResult<'tcx, Immediate<Provenance>> {
891        use rand::Rng as _;
892        let this = self.eval_context_mut();
893        this.atomic_access_check(place, AtomicAccessType::Rmw)?;
894
895        // Read as immediate for the sake of `binary_op()`
896        let old = this.allow_data_races_mut(|this| this.read_immediate(place))?;
897
898        // Inform GenMC about the atomic atomic compare exchange.
899        if let Some(genmc_ctx) = this.machine.data_race.as_genmc_ref() {
900            let (old_value, new_value, cmpxchg_success) = genmc_ctx.atomic_compare_exchange(
901                this,
902                place.ptr().addr(),
903                place.layout.size,
904                this.read_scalar(expect_old)?,
905                new,
906                success,
907                fail,
908                can_fail_spuriously,
909                old.to_scalar(),
910            )?;
911            // The store might be the latest store in coherence order (determined by GenMC).
912            // If it is, we need to update the value in Miri's memory:
913            if let Some(new_value) = new_value {
914                this.allow_data_races_mut(|this| this.write_scalar(new_value, place))?;
915            }
916            return interp_ok(Immediate::ScalarPair(old_value, Scalar::from_bool(cmpxchg_success)));
917        }
918
919        // `binary_op` will bail if either of them is not a scalar.
920        let eq = this.binary_op(mir::BinOp::Eq, &old, expect_old)?;
921        // If the operation would succeed, but is "weak", fail some portion
922        // of the time, based on `success_rate`.
923        let success_rate = 1.0 - this.machine.cmpxchg_weak_failure_rate;
924        let cmpxchg_success = eq.to_scalar().to_bool()?
925            && if can_fail_spuriously {
926                this.machine.rng.get_mut().random_bool(success_rate)
927            } else {
928                true
929            };
930        let res = Immediate::ScalarPair(old.to_scalar(), Scalar::from_bool(cmpxchg_success));
931
932        // Update ptr depending on comparison.
933        // if successful, perform a full rw-atomic validation
934        // otherwise treat this as an atomic load with the fail ordering.
935        if cmpxchg_success {
936            this.allow_data_races_mut(|this| this.write_scalar(new, place))?;
937            this.validate_atomic_rmw(place, success)?;
938            this.buffered_atomic_rmw(new, place, success, old.to_scalar())?;
939        } else {
940            this.validate_atomic_load(place, fail, /* can use latest sync clock */ None)?;
941            // A failed compare exchange is equivalent to a load, reading from the latest store
942            // in the modification order.
943            // Since `old` is only a value and not the store element, we need to separately
944            // find it in our store buffer and perform load_impl on it.
945            this.perform_read_on_buffered_latest(place, fail)?;
946        }
947
948        // Return the old value.
949        interp_ok(res)
950    }
951
952    /// Update the data-race detector for an atomic fence on the current thread.
953    fn atomic_fence(&mut self, atomic: AtomicFenceOrd) -> InterpResult<'tcx> {
954        let this = self.eval_context_mut();
955        let machine = &this.machine;
956        match &this.machine.data_race {
957            GlobalDataRaceHandler::None => interp_ok(()),
958            GlobalDataRaceHandler::Vclocks(data_race) => data_race.atomic_fence(machine, atomic),
959            GlobalDataRaceHandler::Genmc(genmc_ctx) => genmc_ctx.atomic_fence(machine, atomic),
960        }
961    }
962
963    /// Calls the callback with the "release" clock of the current thread.
964    /// Other threads can acquire this clock in the future to establish synchronization
965    /// with this program point.
966    ///
967    /// The closure will only be invoked if data race handling is on.
968    fn release_clock<R>(
969        &self,
970        callback: impl FnOnce(&VClock) -> R,
971    ) -> InterpResult<'tcx, Option<R>> {
972        let this = self.eval_context_ref();
973        interp_ok(match &this.machine.data_race {
974            GlobalDataRaceHandler::None => None,
975            GlobalDataRaceHandler::Genmc(_genmc_ctx) =>
976                throw_unsup_format!(
977                    "this operation performs synchronization that is not supported in GenMC mode"
978                ),
979            GlobalDataRaceHandler::Vclocks(data_race) =>
980                Some(data_race.release_clock(&this.machine.threads, callback)),
981        })
982    }
983
984    /// Acquire the given clock into the current thread, establishing synchronization with
985    /// the moment when that clock snapshot was taken via `release_clock`.
986    fn acquire_clock(&self, clock: &VClock) -> InterpResult<'tcx> {
987        let this = self.eval_context_ref();
988        match &this.machine.data_race {
989            GlobalDataRaceHandler::None => {}
990            GlobalDataRaceHandler::Genmc(_genmc_ctx) =>
991                throw_unsup_format!(
992                    "this operation performs synchronization that is not supported in GenMC mode"
993                ),
994            GlobalDataRaceHandler::Vclocks(data_race) =>
995                data_race.acquire_clock(clock, &this.machine.threads),
996        }
997        interp_ok(())
998    }
999}
1000
1001/// Vector clock metadata for a logical memory allocation.
1002#[derive(Debug, Clone)]
1003pub struct VClockAlloc {
1004    /// Assigning each byte a MemoryCellClocks.
1005    alloc_ranges: RefCell<DedupRangeMap<MemoryCellClocks>>,
1006}
1007
1008impl VisitProvenance for VClockAlloc {
1009    fn visit_provenance(&self, _visit: &mut VisitWith<'_>) {
1010        // No tags or allocIds here.
1011    }
1012}
1013
1014impl VClockAlloc {
1015    /// Create a new data-race detector for newly allocated memory.
1016    pub fn new_allocation(
1017        global: &GlobalState,
1018        thread_mgr: &ThreadManager<'_>,
1019        len: Size,
1020        kind: MemoryKind,
1021        current_span: Span,
1022    ) -> VClockAlloc {
1023        // Determine the thread that did the allocation, and when it did it.
1024        let (alloc_timestamp, alloc_index) = match kind {
1025            // User allocated and stack memory should track allocation.
1026            MemoryKind::Machine(
1027                MiriMemoryKind::Rust
1028                | MiriMemoryKind::Miri
1029                | MiriMemoryKind::C
1030                | MiriMemoryKind::WinHeap
1031                | MiriMemoryKind::WinLocal
1032                | MiriMemoryKind::Mmap,
1033            )
1034            | MemoryKind::Stack => {
1035                let (alloc_index, clocks) = global.active_thread_state(thread_mgr);
1036                let mut alloc_timestamp = clocks.clock[alloc_index];
1037                alloc_timestamp.span = current_span;
1038                (alloc_timestamp, alloc_index)
1039            }
1040            // Other global memory should trace races but be allocated at the 0 timestamp
1041            // (conceptually they are allocated on the main thread before everything).
1042            MemoryKind::Machine(
1043                MiriMemoryKind::Global
1044                | MiriMemoryKind::Machine
1045                | MiriMemoryKind::Runtime
1046                | MiriMemoryKind::ExternStatic
1047                | MiriMemoryKind::Tls,
1048            )
1049            | MemoryKind::CallerLocation =>
1050                (VTimestamp::ZERO, global.thread_index(ThreadId::MAIN_THREAD)),
1051        };
1052        VClockAlloc {
1053            alloc_ranges: RefCell::new(DedupRangeMap::new(
1054                len,
1055                MemoryCellClocks::new(alloc_timestamp, alloc_index),
1056            )),
1057        }
1058    }
1059
1060    // Find an index, if one exists where the value
1061    // in `l` is greater than the value in `r`.
1062    fn find_gt_index(l: &VClock, r: &VClock) -> Option<VectorIdx> {
1063        trace!("Find index where not {:?} <= {:?}", l, r);
1064        let l_slice = l.as_slice();
1065        let r_slice = r.as_slice();
1066        l_slice
1067            .iter()
1068            .zip(r_slice.iter())
1069            .enumerate()
1070            .find_map(|(idx, (&l, &r))| if l > r { Some(idx) } else { None })
1071            .or_else(|| {
1072                if l_slice.len() > r_slice.len() {
1073                    // By invariant, if l_slice is longer
1074                    // then one element must be larger.
1075                    // This just validates that this is true
1076                    // and reports earlier elements first.
1077                    let l_remainder_slice = &l_slice[r_slice.len()..];
1078                    let idx = l_remainder_slice
1079                        .iter()
1080                        .enumerate()
1081                        .find_map(|(idx, &r)| if r == VTimestamp::ZERO { None } else { Some(idx) })
1082                        .expect("Invalid VClock Invariant");
1083                    Some(idx + r_slice.len())
1084                } else {
1085                    None
1086                }
1087            })
1088            .map(VectorIdx::new)
1089    }
1090
1091    /// Report a data-race found in the program.
1092    /// This finds the two racing threads and the type
1093    /// of data-race that occurred. This will also
1094    /// return info about the memory location the data-race
1095    /// occurred in. The `ty` parameter is used for diagnostics, letting
1096    /// the user know which type was involved in the access.
1097    #[cold]
1098    #[inline(never)]
1099    fn report_data_race<'tcx>(
1100        global: &GlobalState,
1101        thread_mgr: &ThreadManager<'_>,
1102        mem_clocks: &MemoryCellClocks,
1103        access: AccessType,
1104        access_size: Size,
1105        ptr_dbg: interpret::Pointer<AllocId>,
1106        ty: Option<Ty<'_>>,
1107    ) -> InterpResult<'tcx> {
1108        let (active_index, active_clocks) = global.active_thread_state(thread_mgr);
1109        let mut other_size = None; // if `Some`, this was a size-mismatch race
1110        let write_clock;
1111        let (other_access, other_thread, other_clock) =
1112            // First check the atomic-nonatomic cases.
1113            if !access.is_atomic() &&
1114                let Some(atomic) = mem_clocks.atomic() &&
1115                let Some(idx) = Self::find_gt_index(&atomic.write_vector, &active_clocks.clock)
1116            {
1117                (AccessType::AtomicStore, idx, &atomic.write_vector)
1118            } else if !access.is_atomic() &&
1119                let Some(atomic) = mem_clocks.atomic() &&
1120                let Some(idx) = Self::find_gt_index(&atomic.read_vector, &active_clocks.clock)
1121            {
1122                (AccessType::AtomicLoad, idx, &atomic.read_vector)
1123            // Then check races with non-atomic writes/reads.
1124            } else if mem_clocks.write.1 > active_clocks.clock[mem_clocks.write.0] {
1125                write_clock = mem_clocks.write();
1126                (AccessType::NaWrite(mem_clocks.write_type), mem_clocks.write.0, &write_clock)
1127            } else if let Some(idx) = Self::find_gt_index(&mem_clocks.read, &active_clocks.clock) {
1128                (AccessType::NaRead(mem_clocks.read[idx].read_type()), idx, &mem_clocks.read)
1129            // Finally, mixed-size races.
1130            } else if access.is_atomic() && let Some(atomic) = mem_clocks.atomic() && atomic.size != Some(access_size) {
1131                // This is only a race if we are not synchronized with all atomic accesses, so find
1132                // the one we are not synchronized with.
1133                other_size = Some(atomic.size.unwrap_or(Size::ZERO));
1134                if let Some(idx) = Self::find_gt_index(&atomic.write_vector, &active_clocks.clock)
1135                    {
1136                        (AccessType::AtomicStore, idx, &atomic.write_vector)
1137                    } else if let Some(idx) =
1138                        Self::find_gt_index(&atomic.read_vector, &active_clocks.clock)
1139                    {
1140                        (AccessType::AtomicLoad, idx, &atomic.read_vector)
1141                    } else {
1142                        unreachable!(
1143                            "Failed to report data-race for mixed-size access: no race found"
1144                        )
1145                    }
1146            } else {
1147                unreachable!("Failed to report data-race")
1148            };
1149
1150        // Load elaborated thread information about the racing thread actions.
1151        let active_thread_info = global.print_thread_metadata(thread_mgr, active_index);
1152        let other_thread_info = global.print_thread_metadata(thread_mgr, other_thread);
1153        let involves_non_atomic = !access.is_atomic() || !other_access.is_atomic();
1154
1155        // Throw the data-race detection.
1156        let extra = if other_size.is_some() {
1157            assert!(!involves_non_atomic);
1158            Some("overlapping unsynchronized atomic accesses must use the same access size")
1159        } else if access.is_read() && other_access.is_read() {
1160            panic!("there should be no same-size read-read races")
1161        } else {
1162            None
1163        };
1164        Err(err_machine_stop!(TerminationInfo::DataRace {
1165            involves_non_atomic,
1166            extra,
1167            retag_explain: access.is_retag() || other_access.is_retag(),
1168            ptr: ptr_dbg,
1169            op1: RacingOp {
1170                action: other_access.description(None, other_size),
1171                thread_info: other_thread_info,
1172                span: other_clock.as_slice()[other_thread.index()].span_data(),
1173            },
1174            op2: RacingOp {
1175                action: access.description(ty, other_size.map(|_| access_size)),
1176                thread_info: active_thread_info,
1177                span: active_clocks.clock.as_slice()[active_index.index()].span_data(),
1178            },
1179        }))?
1180    }
1181
1182    /// Return the release/acquire synchronization clock for the given memory range.
1183    pub(super) fn sync_clock(&self, access_range: AllocRange) -> VClock {
1184        let alloc_ranges = self.alloc_ranges.borrow();
1185        let mut clock = VClock::default();
1186        for (_, mem_clocks) in alloc_ranges.iter(access_range.start, access_range.size) {
1187            if let Some(atomic) = mem_clocks.atomic() {
1188                clock.join(&atomic.sync_vector);
1189            }
1190        }
1191        clock
1192    }
1193
1194    /// Detect data-races for an unsynchronized read operation. It will not perform
1195    /// data-race detection if `race_detecting()` is false, either due to no threads
1196    /// being created or if it is temporarily disabled during a racy read or write
1197    /// operation for which data-race detection is handled separately, for example
1198    /// atomic read operations. The `ty` parameter is used for diagnostics, letting
1199    /// the user know which type was read.
1200    pub fn read_non_atomic<'tcx>(
1201        &self,
1202        alloc_id: AllocId,
1203        access_range: AllocRange,
1204        read_type: NaReadType,
1205        ty: Option<Ty<'_>>,
1206        machine: &MiriMachine<'_>,
1207    ) -> InterpResult<'tcx> {
1208        let current_span = machine.current_user_relevant_span();
1209        let global = machine.data_race.as_vclocks_ref().unwrap();
1210        if !global.race_detecting() {
1211            return interp_ok(());
1212        }
1213        let (index, mut thread_clocks) = global.active_thread_state_mut(&machine.threads);
1214        let mut alloc_ranges = self.alloc_ranges.borrow_mut();
1215        for (mem_clocks_range, mem_clocks) in
1216            alloc_ranges.iter_mut(access_range.start, access_range.size)
1217        {
1218            if let Err(DataRace) =
1219                mem_clocks.read_race_detect(&mut thread_clocks, index, read_type, current_span)
1220            {
1221                drop(thread_clocks);
1222                // Report data-race.
1223                return Self::report_data_race(
1224                    global,
1225                    &machine.threads,
1226                    mem_clocks,
1227                    AccessType::NaRead(read_type),
1228                    access_range.size,
1229                    interpret::Pointer::new(alloc_id, Size::from_bytes(mem_clocks_range.start)),
1230                    ty,
1231                );
1232            }
1233        }
1234        interp_ok(())
1235    }
1236
1237    /// Detect data-races for an unsynchronized write operation. It will not perform
1238    /// data-race detection if `race_detecting()` is false, either due to no threads
1239    /// being created or if it is temporarily disabled during a racy read or write
1240    /// operation. The `ty` parameter is used for diagnostics, letting
1241    /// the user know which type was written.
1242    pub fn write_non_atomic<'tcx>(
1243        &mut self,
1244        alloc_id: AllocId,
1245        access_range: AllocRange,
1246        write_type: NaWriteType,
1247        ty: Option<Ty<'_>>,
1248        machine: &mut MiriMachine<'_>,
1249    ) -> InterpResult<'tcx> {
1250        let current_span = machine.current_user_relevant_span();
1251        let global = machine.data_race.as_vclocks_mut().unwrap();
1252        if !global.race_detecting() {
1253            return interp_ok(());
1254        }
1255        let (index, mut thread_clocks) = global.active_thread_state_mut(&machine.threads);
1256        for (mem_clocks_range, mem_clocks) in
1257            self.alloc_ranges.get_mut().iter_mut(access_range.start, access_range.size)
1258        {
1259            if let Err(DataRace) =
1260                mem_clocks.write_race_detect(&mut thread_clocks, index, write_type, current_span)
1261            {
1262                drop(thread_clocks);
1263                // Report data-race
1264                return Self::report_data_race(
1265                    global,
1266                    &machine.threads,
1267                    mem_clocks,
1268                    AccessType::NaWrite(write_type),
1269                    access_range.size,
1270                    interpret::Pointer::new(alloc_id, Size::from_bytes(mem_clocks_range.start)),
1271                    ty,
1272                );
1273            }
1274        }
1275        interp_ok(())
1276    }
1277}
1278
1279/// Vector clock state for a stack frame (tracking the local variables
1280/// that do not have an allocation yet).
1281#[derive(Debug, Default)]
1282pub struct FrameState {
1283    local_clocks: RefCell<FxHashMap<mir::Local, LocalClocks>>,
1284}
1285
1286/// Stripped-down version of [`MemoryCellClocks`] for the clocks we need to keep track
1287/// of in a local that does not yet have addressable memory -- and hence can only
1288/// be accessed from the thread its stack frame belongs to, and cannot be access atomically.
1289#[derive(Debug)]
1290struct LocalClocks {
1291    write: VTimestamp,
1292    write_type: NaWriteType,
1293    read: VTimestamp,
1294}
1295
1296impl Default for LocalClocks {
1297    fn default() -> Self {
1298        Self { write: VTimestamp::ZERO, write_type: NaWriteType::Allocate, read: VTimestamp::ZERO }
1299    }
1300}
1301
1302impl FrameState {
1303    pub fn local_write(&self, local: mir::Local, storage_live: bool, machine: &MiriMachine<'_>) {
1304        let current_span = machine.current_user_relevant_span();
1305        let global = machine.data_race.as_vclocks_ref().unwrap();
1306        if !global.race_detecting() {
1307            return;
1308        }
1309        let (index, mut thread_clocks) = global.active_thread_state_mut(&machine.threads);
1310        // This should do the same things as `MemoryCellClocks::write_race_detect`.
1311        if !current_span.is_dummy() {
1312            thread_clocks.clock.index_mut(index).span = current_span;
1313        }
1314        let mut clocks = self.local_clocks.borrow_mut();
1315        if storage_live {
1316            let new_clocks = LocalClocks {
1317                write: thread_clocks.clock[index],
1318                write_type: NaWriteType::Allocate,
1319                read: VTimestamp::ZERO,
1320            };
1321            // There might already be an entry in the map for this, if the local was previously
1322            // live already.
1323            clocks.insert(local, new_clocks);
1324        } else {
1325            // This can fail to exist if `race_detecting` was false when the allocation
1326            // occurred, in which case we can backdate this to the beginning of time.
1327            let clocks = clocks.entry(local).or_default();
1328            clocks.write = thread_clocks.clock[index];
1329            clocks.write_type = NaWriteType::Write;
1330        }
1331    }
1332
1333    pub fn local_read(&self, local: mir::Local, machine: &MiriMachine<'_>) {
1334        let current_span = machine.current_user_relevant_span();
1335        let global = machine.data_race.as_vclocks_ref().unwrap();
1336        if !global.race_detecting() {
1337            return;
1338        }
1339        let (index, mut thread_clocks) = global.active_thread_state_mut(&machine.threads);
1340        // This should do the same things as `MemoryCellClocks::read_race_detect`.
1341        if !current_span.is_dummy() {
1342            thread_clocks.clock.index_mut(index).span = current_span;
1343        }
1344        thread_clocks.clock.index_mut(index).set_read_type(NaReadType::Read);
1345        // This can fail to exist if `race_detecting` was false when the allocation
1346        // occurred, in which case we can backdate this to the beginning of time.
1347        let mut clocks = self.local_clocks.borrow_mut();
1348        let clocks = clocks.entry(local).or_default();
1349        clocks.read = thread_clocks.clock[index];
1350    }
1351
1352    pub fn local_moved_to_memory(
1353        &self,
1354        local: mir::Local,
1355        alloc: &mut VClockAlloc,
1356        machine: &MiriMachine<'_>,
1357    ) {
1358        let global = machine.data_race.as_vclocks_ref().unwrap();
1359        if !global.race_detecting() {
1360            return;
1361        }
1362        let (index, _thread_clocks) = global.active_thread_state_mut(&machine.threads);
1363        // Get the time the last write actually happened. This can fail to exist if
1364        // `race_detecting` was false when the write occurred, in that case we can backdate this
1365        // to the beginning of time.
1366        let local_clocks = self.local_clocks.borrow_mut().remove(&local).unwrap_or_default();
1367        for (_mem_clocks_range, mem_clocks) in alloc.alloc_ranges.get_mut().iter_mut_all() {
1368            // The initialization write for this already happened, just at the wrong timestamp.
1369            // Check that the thread index matches what we expect.
1370            assert_eq!(mem_clocks.write.0, index);
1371            // Convert the local's clocks into memory clocks.
1372            mem_clocks.write = (index, local_clocks.write);
1373            mem_clocks.write_type = local_clocks.write_type;
1374            mem_clocks.read = VClock::new_with_index(index, local_clocks.read);
1375        }
1376    }
1377}
1378
1379impl<'tcx> EvalContextPrivExt<'tcx> for MiriInterpCx<'tcx> {}
1380trait EvalContextPrivExt<'tcx>: MiriInterpCxExt<'tcx> {
1381    /// Temporarily allow data-races to occur. This should only be used in
1382    /// one of these cases:
1383    /// - One of the appropriate `validate_atomic` functions will be called to
1384    ///   treat a memory access as atomic.
1385    /// - The memory being accessed should be treated as internal state, that
1386    ///   cannot be accessed by the interpreted program.
1387    /// - Execution of the interpreted program execution has halted.
1388    #[inline]
1389    fn allow_data_races_ref<R>(&self, op: impl FnOnce(&MiriInterpCx<'tcx>) -> R) -> R {
1390        let this = self.eval_context_ref();
1391        this.machine.data_race.set_ongoing_action_data_race_free(true);
1392        let result = op(this);
1393        this.machine.data_race.set_ongoing_action_data_race_free(false);
1394        result
1395    }
1396
1397    /// Same as `allow_data_races_ref`, this temporarily disables any data-race detection and
1398    /// so should only be used for atomic operations or internal state that the program cannot
1399    /// access.
1400    #[inline]
1401    fn allow_data_races_mut<R>(&mut self, op: impl FnOnce(&mut MiriInterpCx<'tcx>) -> R) -> R {
1402        let this = self.eval_context_mut();
1403        this.machine.data_race.set_ongoing_action_data_race_free(true);
1404        let result = op(this);
1405        this.machine.data_race.set_ongoing_action_data_race_free(false);
1406        result
1407    }
1408
1409    /// Checks that an atomic access is legal at the given place.
1410    fn atomic_access_check(
1411        &self,
1412        place: &MPlaceTy<'tcx>,
1413        access_type: AtomicAccessType,
1414    ) -> InterpResult<'tcx> {
1415        let this = self.eval_context_ref();
1416        // Check alignment requirements. Atomics must always be aligned to their size,
1417        // even if the type they wrap would be less aligned (e.g. AtomicU64 on 32bit must
1418        // be 8-aligned).
1419        let align = Align::from_bytes(place.layout.size.bytes()).unwrap();
1420        this.check_ptr_align(place.ptr(), align)?;
1421        // Ensure the allocation is mutable. Even failing (read-only) compare_exchange need mutable
1422        // memory on many targets (i.e., they segfault if that memory is mapped read-only), and
1423        // atomic loads can be implemented via compare_exchange on some targets. There could
1424        // possibly be some very specific exceptions to this, see
1425        // <https://github.com/rust-lang/miri/pull/2464#discussion_r939636130> for details.
1426        // We avoid `get_ptr_alloc` since we do *not* want to run the access hooks -- the actual
1427        // access will happen later.
1428        let (alloc_id, _offset, _prov) = this
1429            .ptr_try_get_alloc_id(place.ptr(), 0)
1430            .expect("there are no zero-sized atomic accesses");
1431        if this.get_alloc_mutability(alloc_id)? == Mutability::Not {
1432            // See if this is fine.
1433            match access_type {
1434                AtomicAccessType::Rmw | AtomicAccessType::Store => {
1435                    throw_ub_format!(
1436                        "atomic store and read-modify-write operations cannot be performed on read-only memory\n\
1437                        see <https://doc.rust-lang.org/nightly/std/sync/atomic/index.html#atomic-accesses-to-read-only-memory> for more information"
1438                    );
1439                }
1440                AtomicAccessType::Load(_)
1441                    if place.layout.size > this.tcx.data_layout().pointer_size() =>
1442                {
1443                    throw_ub_format!(
1444                        "large atomic load operations cannot be performed on read-only memory\n\
1445                        these operations often have to be implemented using read-modify-write operations, which require writeable memory\n\
1446                        see <https://doc.rust-lang.org/nightly/std/sync/atomic/index.html#atomic-accesses-to-read-only-memory> for more information"
1447                    );
1448                }
1449                AtomicAccessType::Load(o) if o != AtomicReadOrd::Relaxed => {
1450                    throw_ub_format!(
1451                        "non-relaxed atomic load operations cannot be performed on read-only memory\n\
1452                        these operations sometimes have to be implemented using read-modify-write operations, which require writeable memory\n\
1453                        see <https://doc.rust-lang.org/nightly/std/sync/atomic/index.html#atomic-accesses-to-read-only-memory> for more information"
1454                    );
1455                }
1456                _ => {
1457                    // Large relaxed loads are fine!
1458                }
1459            }
1460        }
1461        interp_ok(())
1462    }
1463
1464    /// Update the data-race detector for an atomic read occurring at the
1465    /// associated memory-place and on the current thread.
1466    fn validate_atomic_load(
1467        &self,
1468        place: &MPlaceTy<'tcx>,
1469        atomic: AtomicReadOrd,
1470        sync_clock: Option<&VClock>,
1471    ) -> InterpResult<'tcx> {
1472        let this = self.eval_context_ref();
1473        this.validate_atomic_op(
1474            place,
1475            atomic,
1476            AccessType::AtomicLoad,
1477            move |memory, clocks, index, atomic| {
1478                if atomic == AtomicReadOrd::Relaxed {
1479                    memory.load_relaxed(&mut *clocks, index, place.layout.size, sync_clock)
1480                } else {
1481                    memory.load_acquire(&mut *clocks, index, place.layout.size, sync_clock)
1482                }
1483            },
1484        )
1485    }
1486
1487    /// Update the data-race detector for an atomic write occurring at the
1488    /// associated memory-place and on the current thread.
1489    fn validate_atomic_store(
1490        &mut self,
1491        place: &MPlaceTy<'tcx>,
1492        atomic: AtomicWriteOrd,
1493    ) -> InterpResult<'tcx> {
1494        let this = self.eval_context_mut();
1495        this.validate_atomic_op(
1496            place,
1497            atomic,
1498            AccessType::AtomicStore,
1499            move |memory, clocks, index, atomic| {
1500                if atomic == AtomicWriteOrd::Relaxed {
1501                    memory.store_relaxed(clocks, index, place.layout.size)
1502                } else {
1503                    memory.store_release(clocks, index, place.layout.size)
1504                }
1505            },
1506        )
1507    }
1508
1509    /// Update the data-race detector for an atomic read-modify-write occurring
1510    /// at the associated memory place and on the current thread.
1511    fn validate_atomic_rmw(
1512        &mut self,
1513        place: &MPlaceTy<'tcx>,
1514        atomic: AtomicRwOrd,
1515    ) -> InterpResult<'tcx> {
1516        use AtomicRwOrd::*;
1517        let acquire = matches!(atomic, Acquire | AcqRel | SeqCst);
1518        let release = matches!(atomic, Release | AcqRel | SeqCst);
1519        let this = self.eval_context_mut();
1520        this.validate_atomic_op(
1521            place,
1522            atomic,
1523            AccessType::AtomicRmw,
1524            move |memory, clocks, index, _| {
1525                if acquire {
1526                    memory.load_acquire(clocks, index, place.layout.size, None)?;
1527                } else {
1528                    memory.load_relaxed(clocks, index, place.layout.size, None)?;
1529                }
1530                if release {
1531                    memory.rmw_release(clocks, index, place.layout.size)
1532                } else {
1533                    memory.rmw_relaxed(clocks, index, place.layout.size)
1534                }
1535            },
1536        )
1537    }
1538
1539    /// Returns the most recent *non-atomic* value stored in the given place.
1540    /// Errors if we don't need that (because we don't do store buffering) or if
1541    /// the most recent value is in fact atomic.
1542    fn get_latest_nonatomic_val(&self, place: &MPlaceTy<'tcx>) -> Result<Option<Scalar>, ()> {
1543        let this = self.eval_context_ref();
1544        // These cannot fail because `atomic_access_check` was done first.
1545        let (alloc_id, offset, _prov) = this.ptr_get_alloc_id(place.ptr(), 0).unwrap();
1546        let alloc_meta = &this.get_alloc_extra(alloc_id).unwrap().data_race;
1547        if alloc_meta.as_weak_memory_ref().is_none() {
1548            // No reason to read old value if we don't track store buffers.
1549            return Err(());
1550        }
1551        let data_race = alloc_meta.as_vclocks_ref().unwrap();
1552        // Only read old value if this is currently a non-atomic location.
1553        for (_range, clocks) in data_race.alloc_ranges.borrow_mut().iter(offset, place.layout.size)
1554        {
1555            // If this had an atomic write that's not before the non-atomic write, that should
1556            // already be in the store buffer. Initializing the store buffer now would use the
1557            // wrong `sync_clock` so we better make sure that does not happen.
1558            if clocks.atomic().is_some_and(|atomic| !(atomic.write_vector <= clocks.write())) {
1559                return Err(());
1560            }
1561        }
1562        // The program didn't actually do a read, so suppress the memory access hooks.
1563        // This is also a very special exception where we just ignore an error -- if this read
1564        // was UB e.g. because the memory is uninitialized, we don't want to know!
1565        Ok(this.run_for_validation_ref(|this| this.read_scalar(place)).discard_err())
1566    }
1567
1568    /// Generic atomic operation implementation
1569    fn validate_atomic_op<A: Debug + Copy>(
1570        &self,
1571        place: &MPlaceTy<'tcx>,
1572        atomic: A,
1573        access: AccessType,
1574        mut op: impl FnMut(
1575            &mut MemoryCellClocks,
1576            &mut ThreadClockSet,
1577            VectorIdx,
1578            A,
1579        ) -> Result<(), DataRace>,
1580    ) -> InterpResult<'tcx> {
1581        let this = self.eval_context_ref();
1582        assert!(access.is_atomic());
1583        let Some(data_race) = this.machine.data_race.as_vclocks_ref() else {
1584            return interp_ok(());
1585        };
1586        if !data_race.race_detecting() {
1587            return interp_ok(());
1588        }
1589        let size = place.layout.size;
1590        let (alloc_id, base_offset, _prov) = this.ptr_get_alloc_id(place.ptr(), 0)?;
1591        // Load and log the atomic operation.
1592        // Note that atomic loads are possible even from read-only allocations, so `get_alloc_extra_mut` is not an option.
1593        let alloc_meta = this.get_alloc_extra(alloc_id)?.data_race.as_vclocks_ref().unwrap();
1594        trace!(
1595            "Atomic op({}) with ordering {:?} on {:?} (size={})",
1596            access.description(None, None),
1597            &atomic,
1598            place.ptr(),
1599            size.bytes()
1600        );
1601
1602        let current_span = this.machine.current_user_relevant_span();
1603        // Perform the atomic operation.
1604        data_race.maybe_perform_sync_operation(
1605            &this.machine.threads,
1606            current_span,
1607            |index, mut thread_clocks| {
1608                for (mem_clocks_range, mem_clocks) in
1609                    alloc_meta.alloc_ranges.borrow_mut().iter_mut(base_offset, size)
1610                {
1611                    if let Err(DataRace) = op(mem_clocks, &mut thread_clocks, index, atomic) {
1612                        mem::drop(thread_clocks);
1613                        return VClockAlloc::report_data_race(
1614                            data_race,
1615                            &this.machine.threads,
1616                            mem_clocks,
1617                            access,
1618                            place.layout.size,
1619                            interpret::Pointer::new(
1620                                alloc_id,
1621                                Size::from_bytes(mem_clocks_range.start),
1622                            ),
1623                            None,
1624                        )
1625                        .map(|_| true);
1626                    }
1627                }
1628
1629                // This conservatively assumes all operations have release semantics
1630                interp_ok(true)
1631            },
1632        )?;
1633
1634        // Log changes to atomic memory.
1635        if tracing::enabled!(tracing::Level::TRACE) {
1636            for (_offset, mem_clocks) in alloc_meta.alloc_ranges.borrow().iter(base_offset, size) {
1637                trace!(
1638                    "Updated atomic memory({:?}, size={}) to {:#?}",
1639                    place.ptr(),
1640                    size.bytes(),
1641                    mem_clocks.atomic_ops
1642                );
1643            }
1644        }
1645
1646        interp_ok(())
1647    }
1648}
1649
1650impl GlobalState {
1651    /// Create a new global state, setup with just thread-id=0
1652    /// advanced to timestamp = 1.
1653    pub fn new(config: &MiriConfig) -> Self {
1654        let mut global_state = GlobalState {
1655            multi_threaded: Cell::new(false),
1656            ongoing_action_data_race_free: Cell::new(false),
1657            vector_clocks: RefCell::new(IndexVec::new()),
1658            vector_info: RefCell::new(IndexVec::new()),
1659            thread_info: RefCell::new(IndexVec::new()),
1660            reuse_candidates: RefCell::new(FxHashSet::default()),
1661            last_sc_fence: RefCell::new(VClock::default()),
1662            last_sc_write_per_thread: RefCell::new(VClock::default()),
1663            track_outdated_loads: config.track_outdated_loads,
1664            weak_memory: config.weak_memory_emulation,
1665        };
1666
1667        // Setup the main-thread since it is not explicitly created:
1668        // uses vector index and thread-id 0.
1669        let index = global_state.vector_clocks.get_mut().push(ThreadClockSet::default());
1670        global_state.vector_info.get_mut().push(ThreadId::MAIN_THREAD);
1671        global_state
1672            .thread_info
1673            .get_mut()
1674            .push(ThreadExtraState { vector_index: Some(index), termination_vector_clock: None });
1675
1676        global_state
1677    }
1678
1679    // We perform data race detection when there are more than 1 active thread
1680    // and we have not temporarily disabled race detection to perform something
1681    // data race free
1682    fn race_detecting(&self) -> bool {
1683        self.multi_threaded.get() && !self.ongoing_action_data_race_free.get()
1684    }
1685
1686    pub fn ongoing_action_data_race_free(&self) -> bool {
1687        self.ongoing_action_data_race_free.get()
1688    }
1689
1690    // Try to find vector index values that can potentially be re-used
1691    // by a new thread instead of a new vector index being created.
1692    fn find_vector_index_reuse_candidate(&self) -> Option<VectorIdx> {
1693        let mut reuse = self.reuse_candidates.borrow_mut();
1694        let vector_clocks = self.vector_clocks.borrow();
1695        for &candidate in reuse.iter() {
1696            let target_timestamp = vector_clocks[candidate].clock[candidate];
1697            if vector_clocks.iter_enumerated().all(|(clock_idx, clock)| {
1698                // The thread happens before the clock, and hence cannot report
1699                // a data-race with this the candidate index.
1700                let no_data_race = clock.clock[candidate] >= target_timestamp;
1701
1702                // The vector represents a thread that has terminated and hence cannot
1703                // report a data-race with the candidate index.
1704                let vector_terminated = reuse.contains(&clock_idx);
1705
1706                // The vector index cannot report a race with the candidate index
1707                // and hence allows the candidate index to be re-used.
1708                no_data_race || vector_terminated
1709            }) {
1710                // All vector clocks for each vector index are equal to
1711                // the target timestamp, and the thread is known to have
1712                // terminated, therefore this vector clock index cannot
1713                // report any more data-races.
1714                assert!(reuse.remove(&candidate));
1715                return Some(candidate);
1716            }
1717        }
1718        None
1719    }
1720
1721    // Hook for thread creation, enabled multi-threaded execution and marks
1722    // the current thread timestamp as happening-before the current thread.
1723    #[inline]
1724    pub fn thread_created(
1725        &mut self,
1726        thread_mgr: &ThreadManager<'_>,
1727        thread: ThreadId,
1728        current_span: Span,
1729    ) {
1730        let current_index = self.active_thread_index(thread_mgr);
1731
1732        // Enable multi-threaded execution, there are now at least two threads
1733        // so data-races are now possible.
1734        self.multi_threaded.set(true);
1735
1736        // Load and setup the associated thread metadata
1737        let mut thread_info = self.thread_info.borrow_mut();
1738        thread_info.ensure_contains_elem(thread, Default::default);
1739
1740        // Assign a vector index for the thread, attempting to re-use an old
1741        // vector index that can no longer report any data-races if possible.
1742        let created_index = if let Some(reuse_index) = self.find_vector_index_reuse_candidate() {
1743            // Now re-configure the re-use candidate, increment the clock
1744            // for the new sync use of the vector.
1745            let vector_clocks = self.vector_clocks.get_mut();
1746            vector_clocks[reuse_index].increment_clock(reuse_index, current_span);
1747
1748            // Locate the old thread the vector was associated with and update
1749            // it to represent the new thread instead.
1750            let vector_info = self.vector_info.get_mut();
1751            let old_thread = vector_info[reuse_index];
1752            vector_info[reuse_index] = thread;
1753
1754            // Mark the thread the vector index was associated with as no longer
1755            // representing a thread index.
1756            thread_info[old_thread].vector_index = None;
1757
1758            reuse_index
1759        } else {
1760            // No vector re-use candidates available, instead create
1761            // a new vector index.
1762            let vector_info = self.vector_info.get_mut();
1763            vector_info.push(thread)
1764        };
1765
1766        trace!("Creating thread = {:?} with vector index = {:?}", thread, created_index);
1767
1768        // Mark the chosen vector index as in use by the thread.
1769        thread_info[thread].vector_index = Some(created_index);
1770
1771        // Create a thread clock set if applicable.
1772        let vector_clocks = self.vector_clocks.get_mut();
1773        if created_index == vector_clocks.next_index() {
1774            vector_clocks.push(ThreadClockSet::default());
1775        }
1776
1777        // Now load the two clocks and configure the initial state.
1778        let (current, created) = vector_clocks.pick2_mut(current_index, created_index);
1779
1780        // Join the created with current, since the current threads
1781        // previous actions happen-before the created thread.
1782        created.join_with(current);
1783
1784        // Advance both threads after the synchronized operation.
1785        // Both operations are considered to have release semantics.
1786        current.increment_clock(current_index, current_span);
1787        created.increment_clock(created_index, current_span);
1788    }
1789
1790    /// Hook on a thread join to update the implicit happens-before relation between the joined
1791    /// thread (the joinee, the thread that someone waited on) and the current thread (the joiner,
1792    /// the thread who was waiting).
1793    #[inline]
1794    pub fn thread_joined(&mut self, threads: &ThreadManager<'_>, joinee: ThreadId) {
1795        let thread_info = self.thread_info.borrow();
1796        let thread_info = &thread_info[joinee];
1797
1798        // Load the associated vector clock for the terminated thread.
1799        let join_clock = thread_info
1800            .termination_vector_clock
1801            .as_ref()
1802            .expect("joined with thread but thread has not terminated");
1803        // Acquire that into the current thread.
1804        self.acquire_clock(join_clock, threads);
1805
1806        // Check the number of live threads, if the value is 1
1807        // then test for potentially disabling multi-threaded execution.
1808        // This has to happen after `acquire_clock`, otherwise there'll always
1809        // be some thread that has not synchronized yet.
1810        if let Some(current_index) = thread_info.vector_index {
1811            if threads.get_live_thread_count() == 1 {
1812                let vector_clocks = self.vector_clocks.get_mut();
1813                // May potentially be able to disable multi-threaded execution.
1814                let current_clock = &vector_clocks[current_index];
1815                if vector_clocks
1816                    .iter_enumerated()
1817                    .all(|(idx, clocks)| clocks.clock[idx] <= current_clock.clock[idx])
1818                {
1819                    // All thread terminations happen-before the current clock
1820                    // therefore no data-races can be reported until a new thread
1821                    // is created, so disable multi-threaded execution.
1822                    self.multi_threaded.set(false);
1823                }
1824            }
1825        }
1826    }
1827
1828    /// On thread termination, the vector clock may be re-used
1829    /// in the future once all remaining thread-clocks catch
1830    /// up with the time index of the terminated thread.
1831    /// This assigns thread termination with a unique index
1832    /// which will be used to join the thread
1833    /// This should be called strictly before any calls to
1834    /// `thread_joined`.
1835    #[inline]
1836    pub fn thread_terminated(&mut self, thread_mgr: &ThreadManager<'_>) {
1837        let current_thread = thread_mgr.active_thread();
1838        let current_index = self.active_thread_index(thread_mgr);
1839
1840        // Store the terminaion clock.
1841        let terminaion_clock = self.release_clock(thread_mgr, |clock| clock.clone());
1842        self.thread_info.get_mut()[current_thread].termination_vector_clock =
1843            Some(terminaion_clock);
1844
1845        // Add this thread's clock index as a candidate for re-use.
1846        let reuse = self.reuse_candidates.get_mut();
1847        reuse.insert(current_index);
1848    }
1849
1850    /// Update the data-race detector for an atomic fence on the current thread.
1851    fn atomic_fence<'tcx>(
1852        &self,
1853        machine: &MiriMachine<'tcx>,
1854        atomic: AtomicFenceOrd,
1855    ) -> InterpResult<'tcx> {
1856        let current_span = machine.current_user_relevant_span();
1857        self.maybe_perform_sync_operation(&machine.threads, current_span, |index, mut clocks| {
1858            trace!("Atomic fence on {:?} with ordering {:?}", index, atomic);
1859
1860            // Apply data-race detection for the current fences
1861            // this treats AcqRel and SeqCst as the same as an acquire
1862            // and release fence applied in the same timestamp.
1863            if atomic != AtomicFenceOrd::Release {
1864                // Either Acquire | AcqRel | SeqCst
1865                clocks.apply_acquire_fence();
1866            }
1867            if atomic == AtomicFenceOrd::SeqCst {
1868                // Behave like an RMW on the global fence location. This takes full care of
1869                // all the SC fence requirements, including C++17 ยง32.4 [atomics.order]
1870                // paragraph 6 (which would limit what future reads can see). It also rules
1871                // out many legal behaviors, but we don't currently have a model that would
1872                // be more precise.
1873                // Also see the second bullet on page 10 of
1874                // <https://www.cs.tau.ac.il/~orilahav/papers/popl21_robustness.pdf>.
1875                let mut sc_fence_clock = self.last_sc_fence.borrow_mut();
1876                sc_fence_clock.join(&clocks.clock);
1877                clocks.clock.join(&sc_fence_clock);
1878                // Also establish some sort of order with the last SC write that happened, globally
1879                // (but this is only respected by future reads).
1880                clocks.write_seqcst.join(&self.last_sc_write_per_thread.borrow());
1881            }
1882            // The release fence is last, since both of the above could alter our clock,
1883            // which should be part of what is being released.
1884            if atomic != AtomicFenceOrd::Acquire {
1885                // Either Release | AcqRel | SeqCst
1886                clocks.apply_release_fence();
1887            }
1888
1889            // Increment timestamp in case of release semantics.
1890            interp_ok(atomic != AtomicFenceOrd::Acquire)
1891        })
1892    }
1893
1894    /// Attempt to perform a synchronized operation, this
1895    /// will perform no operation if multi-threading is
1896    /// not currently enabled.
1897    /// Otherwise it will increment the clock for the current
1898    /// vector before and after the operation for data-race
1899    /// detection between any happens-before edges the
1900    /// operation may create.
1901    fn maybe_perform_sync_operation<'tcx>(
1902        &self,
1903        thread_mgr: &ThreadManager<'_>,
1904        current_span: Span,
1905        op: impl FnOnce(VectorIdx, RefMut<'_, ThreadClockSet>) -> InterpResult<'tcx, bool>,
1906    ) -> InterpResult<'tcx> {
1907        if self.multi_threaded.get() {
1908            let (index, clocks) = self.active_thread_state_mut(thread_mgr);
1909            if op(index, clocks)? {
1910                let (_, mut clocks) = self.active_thread_state_mut(thread_mgr);
1911                clocks.increment_clock(index, current_span);
1912            }
1913        }
1914        interp_ok(())
1915    }
1916
1917    /// Internal utility to identify a thread stored internally
1918    /// returns the id and the name for better diagnostics.
1919    fn print_thread_metadata(&self, thread_mgr: &ThreadManager<'_>, vector: VectorIdx) -> String {
1920        let thread = self.vector_info.borrow()[vector];
1921        let thread_name = thread_mgr.get_thread_display_name(thread);
1922        format!("thread `{thread_name}`")
1923    }
1924
1925    /// Acquire the given clock into the current thread, establishing synchronization with
1926    /// the moment when that clock snapshot was taken via `release_clock`.
1927    /// As this is an acquire operation, the thread timestamp is not
1928    /// incremented.
1929    pub fn acquire_clock<'tcx>(&self, clock: &VClock, threads: &ThreadManager<'tcx>) {
1930        let thread = threads.active_thread();
1931        let (_, mut clocks) = self.thread_state_mut(thread);
1932        clocks.clock.join(clock);
1933    }
1934
1935    /// Calls the given closure with the "release" clock of the current thread.
1936    /// Other threads can acquire this clock in the future to establish synchronization
1937    /// with this program point.
1938    pub fn release_clock<'tcx, R>(
1939        &self,
1940        threads: &ThreadManager<'tcx>,
1941        callback: impl FnOnce(&VClock) -> R,
1942    ) -> R {
1943        let thread = threads.active_thread();
1944        let span = threads.active_thread_ref().current_user_relevant_span();
1945        let (index, mut clocks) = self.thread_state_mut(thread);
1946        let r = callback(&clocks.clock);
1947        // Increment the clock, so that all following events cannot be confused with anything that
1948        // occurred before the release. Crucially, the callback is invoked on the *old* clock!
1949        clocks.increment_clock(index, span);
1950
1951        r
1952    }
1953
1954    fn thread_index(&self, thread: ThreadId) -> VectorIdx {
1955        self.thread_info.borrow()[thread].vector_index.expect("thread has no assigned vector")
1956    }
1957
1958    /// Load the vector index used by the given thread as well as the set of vector clocks
1959    /// used by the thread.
1960    #[inline]
1961    fn thread_state_mut(&self, thread: ThreadId) -> (VectorIdx, RefMut<'_, ThreadClockSet>) {
1962        let index = self.thread_index(thread);
1963        let ref_vector = self.vector_clocks.borrow_mut();
1964        let clocks = RefMut::map(ref_vector, |vec| &mut vec[index]);
1965        (index, clocks)
1966    }
1967
1968    /// Load the vector index used by the given thread as well as the set of vector clocks
1969    /// used by the thread.
1970    #[inline]
1971    fn thread_state(&self, thread: ThreadId) -> (VectorIdx, Ref<'_, ThreadClockSet>) {
1972        let index = self.thread_index(thread);
1973        let ref_vector = self.vector_clocks.borrow();
1974        let clocks = Ref::map(ref_vector, |vec| &vec[index]);
1975        (index, clocks)
1976    }
1977
1978    /// Load the current vector clock in use and the current set of thread clocks
1979    /// in use for the vector.
1980    #[inline]
1981    pub(super) fn active_thread_state(
1982        &self,
1983        thread_mgr: &ThreadManager<'_>,
1984    ) -> (VectorIdx, Ref<'_, ThreadClockSet>) {
1985        self.thread_state(thread_mgr.active_thread())
1986    }
1987
1988    /// Load the current vector clock in use and the current set of thread clocks
1989    /// in use for the vector mutably for modification.
1990    #[inline]
1991    pub(super) fn active_thread_state_mut(
1992        &self,
1993        thread_mgr: &ThreadManager<'_>,
1994    ) -> (VectorIdx, RefMut<'_, ThreadClockSet>) {
1995        self.thread_state_mut(thread_mgr.active_thread())
1996    }
1997
1998    /// Return the current thread, should be the same
1999    /// as the data-race active thread.
2000    #[inline]
2001    fn active_thread_index(&self, thread_mgr: &ThreadManager<'_>) -> VectorIdx {
2002        let active_thread_id = thread_mgr.active_thread();
2003        self.thread_index(active_thread_id)
2004    }
2005
2006    // SC ATOMIC STORE rule in the paper.
2007    pub(super) fn sc_write(&self, thread_mgr: &ThreadManager<'_>) {
2008        let (index, clocks) = self.active_thread_state(thread_mgr);
2009        self.last_sc_write_per_thread.borrow_mut().set_at_index(&clocks.clock, index);
2010    }
2011
2012    // SC ATOMIC READ rule in the paper.
2013    pub(super) fn sc_read(&self, thread_mgr: &ThreadManager<'_>) {
2014        let (.., mut clocks) = self.active_thread_state_mut(thread_mgr);
2015        clocks.read_seqcst.join(&self.last_sc_fence.borrow());
2016    }
2017}