1use std::cell::{Cell, Ref, RefCell, RefMut};
44use std::fmt::Debug;
45use std::mem;
46
47use rustc_abi::{Align, HasDataLayout, Size};
48use rustc_ast::Mutability;
49use rustc_data_structures::fx::{FxHashMap, FxHashSet};
50use rustc_index::{Idx, IndexVec};
51use rustc_log::tracing;
52use rustc_middle::mir;
53use rustc_middle::ty::Ty;
54use rustc_span::Span;
55
56use super::vector_clock::{VClock, VTimestamp, VectorIdx};
57use super::weak_memory::EvalContextExt as _;
58use crate::concurrency::GlobalDataRaceHandler;
59use crate::diagnostics::RacingOp;
60use crate::intrinsics::AtomicRmwOp;
61use crate::*;
62
63pub type AllocState = VClockAlloc;
64
65#[derive(Copy, Clone, PartialEq, Eq, Debug)]
67pub enum AtomicRwOrd {
68 Relaxed,
69 Acquire,
70 Release,
71 AcqRel,
72 SeqCst,
73}
74
75#[derive(Copy, Clone, PartialEq, Eq, Debug)]
77pub enum AtomicReadOrd {
78 Relaxed,
79 Acquire,
80 SeqCst,
81}
82
83#[derive(Copy, Clone, PartialEq, Eq, Debug)]
85pub enum AtomicWriteOrd {
86 Relaxed,
87 Release,
88 SeqCst,
89}
90
91#[derive(Copy, Clone, PartialEq, Eq, Debug)]
93pub enum AtomicFenceOrd {
94 Acquire,
95 Release,
96 AcqRel,
97 SeqCst,
98}
99
100#[derive(Clone, Default, Debug)]
104pub(super) struct ThreadClockSet {
105 pub(super) clock: VClock,
108
109 fence_acquire: VClock,
112
113 fence_release: VClock,
116
117 pub(super) write_seqcst: VClock,
122
123 pub(super) read_seqcst: VClock,
128}
129
130impl ThreadClockSet {
131 #[inline]
134 fn apply_release_fence(&mut self) {
135 self.fence_release.clone_from(&self.clock);
136 }
137
138 #[inline]
141 fn apply_acquire_fence(&mut self) {
142 self.clock.join(&self.fence_acquire);
143 }
144
145 #[inline]
148 fn increment_clock(&mut self, index: VectorIdx, current_span: Span) {
149 self.clock.increment_index(index, current_span);
150 }
151
152 fn join_with(&mut self, other: &ThreadClockSet) {
156 self.clock.join(&other.clock);
157 }
158}
159
160#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)]
163pub struct DataRace;
164
165#[derive(Clone, PartialEq, Eq, Debug)]
170struct AtomicMemoryCellClocks {
171 read_vector: VClock,
176
177 write_vector: VClock,
182
183 sync_vector: VClock,
191
192 size: Option<Size>,
197}
198
199#[derive(Copy, Clone, PartialEq, Eq, Debug)]
200enum AtomicAccessType {
201 Load(AtomicReadOrd),
202 Store,
203 Rmw,
204}
205
206#[derive(Copy, Clone, PartialEq, Eq, Debug)]
208pub enum NaReadType {
209 Read,
211
212 Retag,
214}
215
216impl NaReadType {
217 fn description(self) -> &'static str {
218 match self {
219 NaReadType::Read => "non-atomic read",
220 NaReadType::Retag => "retag read",
221 }
222 }
223}
224
225#[derive(Copy, Clone, PartialEq, Eq, Debug)]
228pub enum NaWriteType {
229 Allocate,
231
232 Write,
234
235 Retag,
237
238 Deallocate,
243}
244
245impl NaWriteType {
246 fn description(self) -> &'static str {
247 match self {
248 NaWriteType::Allocate => "creating a new allocation",
249 NaWriteType::Write => "non-atomic write",
250 NaWriteType::Retag => "retag write",
251 NaWriteType::Deallocate => "deallocation",
252 }
253 }
254}
255
256#[derive(Copy, Clone, PartialEq, Eq, Debug)]
257enum AccessType {
258 NaRead(NaReadType),
259 NaWrite(NaWriteType),
260 AtomicLoad,
261 AtomicStore,
262 AtomicRmw,
263}
264
265#[derive(Clone, PartialEq, Eq, Debug)]
267struct MemoryCellClocks {
268 write: (VectorIdx, VTimestamp),
272
273 write_type: NaWriteType,
277
278 read: VClock,
282
283 atomic_ops: Option<Box<AtomicMemoryCellClocks>>,
287}
288
289#[derive(Debug, Clone, Default)]
291struct ThreadExtraState {
292 vector_index: Option<VectorIdx>,
298
299 termination_vector_clock: Option<VClock>,
304}
305
306#[derive(Debug, Clone)]
311pub struct GlobalState {
312 multi_threaded: Cell<bool>,
319
320 ongoing_action_data_race_free: Cell<bool>,
324
325 vector_clocks: RefCell<IndexVec<VectorIdx, ThreadClockSet>>,
329
330 vector_info: RefCell<IndexVec<VectorIdx, ThreadId>>,
334
335 thread_info: RefCell<IndexVec<ThreadId, ThreadExtraState>>,
337
338 reuse_candidates: RefCell<FxHashSet<VectorIdx>>,
346
347 last_sc_fence: RefCell<VClock>,
350
351 last_sc_write_per_thread: RefCell<VClock>,
354
355 pub track_outdated_loads: bool,
357
358 pub weak_memory: bool,
360}
361
362impl VisitProvenance for GlobalState {
363 fn visit_provenance(&self, _visit: &mut VisitWith<'_>) {
364 }
366}
367
368impl AccessType {
369 fn description(self, ty: Option<Ty<'_>>, size: Option<Size>) -> String {
370 let mut msg = String::new();
371
372 if let Some(size) = size {
373 if size == Size::ZERO {
374 assert!(self == AccessType::AtomicLoad);
378 assert!(ty.is_none());
379 return format!("multiple differently-sized atomic loads, including one load");
380 }
381 msg.push_str(&format!("{}-byte {}", size.bytes(), msg))
382 }
383
384 msg.push_str(match self {
385 AccessType::NaRead(w) => w.description(),
386 AccessType::NaWrite(w) => w.description(),
387 AccessType::AtomicLoad => "atomic load",
388 AccessType::AtomicStore => "atomic store",
389 AccessType::AtomicRmw => "atomic read-modify-write",
390 });
391
392 if let Some(ty) = ty {
393 msg.push_str(&format!(" of type `{ty}`"));
394 }
395
396 msg
397 }
398
399 fn is_atomic(self) -> bool {
400 match self {
401 AccessType::AtomicLoad | AccessType::AtomicStore | AccessType::AtomicRmw => true,
402 AccessType::NaRead(_) | AccessType::NaWrite(_) => false,
403 }
404 }
405
406 fn is_read(self) -> bool {
407 match self {
408 AccessType::AtomicLoad | AccessType::NaRead(_) => true,
409 AccessType::NaWrite(_) | AccessType::AtomicStore | AccessType::AtomicRmw => false,
410 }
411 }
412
413 fn is_retag(self) -> bool {
414 matches!(
415 self,
416 AccessType::NaRead(NaReadType::Retag) | AccessType::NaWrite(NaWriteType::Retag)
417 )
418 }
419}
420
421impl AtomicMemoryCellClocks {
422 fn new(size: Size) -> Self {
423 AtomicMemoryCellClocks {
424 read_vector: Default::default(),
425 write_vector: Default::default(),
426 sync_vector: Default::default(),
427 size: Some(size),
428 }
429 }
430}
431
432impl MemoryCellClocks {
433 fn new(alloc: VTimestamp, alloc_index: VectorIdx) -> Self {
436 MemoryCellClocks {
437 read: VClock::default(),
438 write: (alloc_index, alloc),
439 write_type: NaWriteType::Allocate,
440 atomic_ops: None,
441 }
442 }
443
444 #[inline]
445 fn write_was_before(&self, other: &VClock) -> bool {
446 self.write.1 <= other[self.write.0]
449 }
450
451 #[inline]
452 fn write(&self) -> VClock {
453 VClock::new_with_index(self.write.0, self.write.1)
454 }
455
456 #[inline]
458 fn atomic(&self) -> Option<&AtomicMemoryCellClocks> {
459 self.atomic_ops.as_deref()
460 }
461
462 #[inline]
464 fn atomic_mut_unwrap(&mut self) -> &mut AtomicMemoryCellClocks {
465 self.atomic_ops.as_deref_mut().unwrap()
466 }
467
468 fn atomic_access(
471 &mut self,
472 thread_clocks: &ThreadClockSet,
473 size: Size,
474 write: bool,
475 ) -> Result<&mut AtomicMemoryCellClocks, DataRace> {
476 match self.atomic_ops {
477 Some(ref mut atomic) => {
478 if atomic.size == Some(size) {
480 Ok(atomic)
481 } else if atomic.read_vector <= thread_clocks.clock
482 && atomic.write_vector <= thread_clocks.clock
483 {
484 atomic.size = Some(size);
486 Ok(atomic)
487 } else if !write && atomic.write_vector <= thread_clocks.clock {
488 atomic.size = None;
491 Ok(atomic)
492 } else {
493 Err(DataRace)
494 }
495 }
496 None => {
497 self.atomic_ops = Some(Box::new(AtomicMemoryCellClocks::new(size)));
498 Ok(self.atomic_ops.as_mut().unwrap())
499 }
500 }
501 }
502
503 fn load_acquire(
507 &mut self,
508 thread_clocks: &mut ThreadClockSet,
509 index: VectorIdx,
510 access_size: Size,
511 sync_clock: Option<&VClock>,
512 ) -> Result<(), DataRace> {
513 self.atomic_read_detect(thread_clocks, index, access_size)?;
514 if let Some(sync_clock) = sync_clock.or_else(|| self.atomic().map(|a| &a.sync_vector)) {
515 thread_clocks.clock.join(sync_clock);
516 }
517 Ok(())
518 }
519
520 fn load_relaxed(
524 &mut self,
525 thread_clocks: &mut ThreadClockSet,
526 index: VectorIdx,
527 access_size: Size,
528 sync_clock: Option<&VClock>,
529 ) -> Result<(), DataRace> {
530 self.atomic_read_detect(thread_clocks, index, access_size)?;
531 if let Some(sync_clock) = sync_clock.or_else(|| self.atomic().map(|a| &a.sync_vector)) {
532 thread_clocks.fence_acquire.join(sync_clock);
533 }
534 Ok(())
535 }
536
537 fn store_release(
540 &mut self,
541 thread_clocks: &ThreadClockSet,
542 index: VectorIdx,
543 access_size: Size,
544 ) -> Result<(), DataRace> {
545 self.atomic_write_detect(thread_clocks, index, access_size)?;
546 let atomic = self.atomic_mut_unwrap(); atomic.sync_vector.clone_from(&thread_clocks.clock);
548 Ok(())
549 }
550
551 fn store_relaxed(
554 &mut self,
555 thread_clocks: &ThreadClockSet,
556 index: VectorIdx,
557 access_size: Size,
558 ) -> Result<(), DataRace> {
559 self.atomic_write_detect(thread_clocks, index, access_size)?;
560
561 let atomic = self.atomic_mut_unwrap();
567 atomic.sync_vector.clone_from(&thread_clocks.fence_release);
568 Ok(())
569 }
570
571 fn rmw_release(
574 &mut self,
575 thread_clocks: &ThreadClockSet,
576 index: VectorIdx,
577 access_size: Size,
578 ) -> Result<(), DataRace> {
579 self.atomic_write_detect(thread_clocks, index, access_size)?;
580 let atomic = self.atomic_mut_unwrap();
581 atomic.sync_vector.join(&thread_clocks.clock);
584 Ok(())
585 }
586
587 fn rmw_relaxed(
590 &mut self,
591 thread_clocks: &ThreadClockSet,
592 index: VectorIdx,
593 access_size: Size,
594 ) -> Result<(), DataRace> {
595 self.atomic_write_detect(thread_clocks, index, access_size)?;
596 let atomic = self.atomic_mut_unwrap();
597 atomic.sync_vector.join(&thread_clocks.fence_release);
600 Ok(())
601 }
602
603 fn atomic_read_detect(
606 &mut self,
607 thread_clocks: &ThreadClockSet,
608 index: VectorIdx,
609 access_size: Size,
610 ) -> Result<(), DataRace> {
611 trace!("Atomic read with vectors: {:#?} :: {:#?}", self, thread_clocks);
612 let atomic = self.atomic_access(thread_clocks, access_size, false)?;
613 atomic.read_vector.set_at_index(&thread_clocks.clock, index);
614 if self.write_was_before(&thread_clocks.clock) { Ok(()) } else { Err(DataRace) }
616 }
617
618 fn atomic_write_detect(
621 &mut self,
622 thread_clocks: &ThreadClockSet,
623 index: VectorIdx,
624 access_size: Size,
625 ) -> Result<(), DataRace> {
626 trace!("Atomic write with vectors: {:#?} :: {:#?}", self, thread_clocks);
627 let atomic = self.atomic_access(thread_clocks, access_size, true)?;
628 atomic.write_vector.set_at_index(&thread_clocks.clock, index);
629 if self.write_was_before(&thread_clocks.clock) && self.read <= thread_clocks.clock {
631 Ok(())
632 } else {
633 Err(DataRace)
634 }
635 }
636
637 fn read_race_detect(
640 &mut self,
641 thread_clocks: &mut ThreadClockSet,
642 index: VectorIdx,
643 read_type: NaReadType,
644 current_span: Span,
645 ) -> Result<(), DataRace> {
646 trace!("Unsynchronized read with vectors: {:#?} :: {:#?}", self, thread_clocks);
647 if !current_span.is_dummy() {
648 thread_clocks.clock.index_mut(index).span = current_span;
649 }
650 thread_clocks.clock.index_mut(index).set_read_type(read_type);
651 if !self.write_was_before(&thread_clocks.clock) {
653 return Err(DataRace);
654 }
655 if !self.atomic().is_none_or(|atomic| atomic.write_vector <= thread_clocks.clock) {
657 return Err(DataRace);
658 }
659 self.read.set_at_index(&thread_clocks.clock, index);
661 Ok(())
662 }
663
664 fn write_race_detect(
667 &mut self,
668 thread_clocks: &mut ThreadClockSet,
669 index: VectorIdx,
670 write_type: NaWriteType,
671 current_span: Span,
672 ) -> Result<(), DataRace> {
673 trace!("Unsynchronized write with vectors: {:#?} :: {:#?}", self, thread_clocks);
674 if !current_span.is_dummy() {
675 thread_clocks.clock.index_mut(index).span = current_span;
676 }
677 if !(self.write_was_before(&thread_clocks.clock) && self.read <= thread_clocks.clock) {
679 return Err(DataRace);
680 }
681 if !self.atomic().is_none_or(|atomic| {
683 atomic.write_vector <= thread_clocks.clock && atomic.read_vector <= thread_clocks.clock
684 }) {
685 return Err(DataRace);
686 }
687 self.write = (index, thread_clocks.clock[index]);
689 self.write_type = write_type;
690 self.read.set_zero_vector();
691 self.atomic_ops = None;
693 Ok(())
694 }
695}
696
697impl GlobalDataRaceHandler {
698 fn set_ongoing_action_data_race_free(&self, enable: bool) {
701 match self {
702 GlobalDataRaceHandler::None => {}
703 GlobalDataRaceHandler::Vclocks(data_race) => {
704 let old = data_race.ongoing_action_data_race_free.replace(enable);
705 assert_ne!(old, enable, "cannot nest allow_data_races");
706 }
707 GlobalDataRaceHandler::Genmc(genmc_ctx) => {
708 genmc_ctx.set_ongoing_action_data_race_free(enable);
709 }
710 }
711 }
712}
713
714impl<'tcx> EvalContextExt<'tcx> for MiriInterpCx<'tcx> {}
716pub trait EvalContextExt<'tcx>: MiriInterpCxExt<'tcx> {
717 fn read_scalar_atomic(
719 &self,
720 place: &MPlaceTy<'tcx>,
721 atomic: AtomicReadOrd,
722 ) -> InterpResult<'tcx, Scalar> {
723 let this = self.eval_context_ref();
724 this.atomic_access_check(place, AtomicAccessType::Load(atomic))?;
725 if let Some(genmc_ctx) = this.machine.data_race.as_genmc_ref() {
732 let old_val = this.run_for_validation_ref(|this| this.read_scalar(place)).discard_err();
733 return genmc_ctx.atomic_load(
734 this,
735 place.ptr().addr(),
736 place.layout.size,
737 atomic,
738 old_val,
739 );
740 }
741
742 let scalar = this.allow_data_races_ref(move |this| this.read_scalar(place))?;
743 let buffered_scalar = this.buffered_atomic_read(place, atomic, scalar, |sync_clock| {
744 this.validate_atomic_load(place, atomic, sync_clock)
745 })?;
746 interp_ok(buffered_scalar.ok_or_else(|| err_ub!(InvalidUninitBytes(None)))?)
747 }
748
749 fn write_scalar_atomic(
751 &mut self,
752 val: Scalar,
753 dest: &MPlaceTy<'tcx>,
754 atomic: AtomicWriteOrd,
755 ) -> InterpResult<'tcx> {
756 let this = self.eval_context_mut();
757 this.atomic_access_check(dest, AtomicAccessType::Store)?;
758
759 if let Some(genmc_ctx) = this.machine.data_race.as_genmc_ref() {
761 let old_val = this.run_for_validation_ref(|this| this.read_scalar(dest)).discard_err();
762 if genmc_ctx.atomic_store(
763 this,
764 dest.ptr().addr(),
765 dest.layout.size,
766 val,
767 old_val,
768 atomic,
769 )? {
770 this.allow_data_races_mut(|this| this.write_scalar(val, dest))?;
773 }
774 return interp_ok(());
775 }
776
777 let old_val = this.get_latest_nonatomic_val(dest);
779 this.allow_data_races_mut(move |this| this.write_scalar(val, dest))?;
780 this.validate_atomic_store(dest, atomic)?;
781 this.buffered_atomic_write(val, dest, atomic, old_val)
782 }
783
784 fn atomic_rmw_op_immediate(
786 &mut self,
787 place: &MPlaceTy<'tcx>,
788 rhs: &ImmTy<'tcx>,
789 atomic_op: AtomicRmwOp,
790 ord: AtomicRwOrd,
791 ) -> InterpResult<'tcx, ImmTy<'tcx>> {
792 let this = self.eval_context_mut();
793 this.atomic_access_check(place, AtomicAccessType::Rmw)?;
794
795 let old = this.allow_data_races_mut(|this| this.read_immediate(place))?;
796
797 if let Some(genmc_ctx) = this.machine.data_race.as_genmc_ref() {
799 let (old_val, new_val) = genmc_ctx.atomic_rmw_op(
800 this,
801 place.ptr().addr(),
802 place.layout.size,
803 atomic_op,
804 place.layout.backend_repr.is_signed(),
805 ord,
806 rhs.to_scalar(),
807 old.to_scalar(),
808 )?;
809 if let Some(new_val) = new_val {
810 this.allow_data_races_mut(|this| this.write_scalar(new_val, place))?;
811 }
812 return interp_ok(ImmTy::from_scalar(old_val, old.layout));
813 }
814
815 let val = match atomic_op {
816 AtomicRmwOp::MirOp { op, neg } => {
817 let val = this.binary_op(op, &old, rhs)?;
818 if neg { this.unary_op(mir::UnOp::Not, &val)? } else { val }
819 }
820 AtomicRmwOp::Max => {
821 let lt = this.binary_op(mir::BinOp::Lt, &old, rhs)?.to_scalar().to_bool()?;
822 if lt { rhs } else { &old }.clone()
823 }
824 AtomicRmwOp::Min => {
825 let lt = this.binary_op(mir::BinOp::Lt, &old, rhs)?.to_scalar().to_bool()?;
826 if lt { &old } else { rhs }.clone()
827 }
828 };
829
830 this.allow_data_races_mut(|this| this.write_immediate(*val, place))?;
831
832 this.validate_atomic_rmw(place, ord)?;
833
834 this.buffered_atomic_rmw(val.to_scalar(), place, ord, old.to_scalar())?;
835 interp_ok(old)
836 }
837
838 fn atomic_exchange_scalar(
841 &mut self,
842 place: &MPlaceTy<'tcx>,
843 new: Scalar,
844 atomic: AtomicRwOrd,
845 ) -> InterpResult<'tcx, Scalar> {
846 let this = self.eval_context_mut();
847 this.atomic_access_check(place, AtomicAccessType::Rmw)?;
848
849 let old = this.allow_data_races_mut(|this| this.read_scalar(place))?;
850 this.allow_data_races_mut(|this| this.write_scalar(new, place))?;
851
852 if let Some(genmc_ctx) = this.machine.data_race.as_genmc_ref() {
854 let (old_val, new_val) = genmc_ctx.atomic_exchange(
855 this,
856 place.ptr().addr(),
857 place.layout.size,
858 new,
859 atomic,
860 old,
861 )?;
862 if let Some(new_val) = new_val {
865 this.allow_data_races_mut(|this| this.write_scalar(new_val, place))?;
866 }
867 return interp_ok(old_val);
868 }
869
870 this.validate_atomic_rmw(place, atomic)?;
871
872 this.buffered_atomic_rmw(new, place, atomic, old)?;
873 interp_ok(old)
874 }
875
876 fn atomic_compare_exchange_scalar(
883 &mut self,
884 place: &MPlaceTy<'tcx>,
885 expect_old: &ImmTy<'tcx>,
886 new: Scalar,
887 success: AtomicRwOrd,
888 fail: AtomicReadOrd,
889 can_fail_spuriously: bool,
890 ) -> InterpResult<'tcx, Immediate<Provenance>> {
891 use rand::Rng as _;
892 let this = self.eval_context_mut();
893 this.atomic_access_check(place, AtomicAccessType::Rmw)?;
894
895 let old = this.allow_data_races_mut(|this| this.read_immediate(place))?;
897
898 if let Some(genmc_ctx) = this.machine.data_race.as_genmc_ref() {
900 let (old_value, new_value, cmpxchg_success) = genmc_ctx.atomic_compare_exchange(
901 this,
902 place.ptr().addr(),
903 place.layout.size,
904 this.read_scalar(expect_old)?,
905 new,
906 success,
907 fail,
908 can_fail_spuriously,
909 old.to_scalar(),
910 )?;
911 if let Some(new_value) = new_value {
914 this.allow_data_races_mut(|this| this.write_scalar(new_value, place))?;
915 }
916 return interp_ok(Immediate::ScalarPair(old_value, Scalar::from_bool(cmpxchg_success)));
917 }
918
919 let eq = this.binary_op(mir::BinOp::Eq, &old, expect_old)?;
921 let success_rate = 1.0 - this.machine.cmpxchg_weak_failure_rate;
924 let cmpxchg_success = eq.to_scalar().to_bool()?
925 && if can_fail_spuriously {
926 this.machine.rng.get_mut().random_bool(success_rate)
927 } else {
928 true
929 };
930 let res = Immediate::ScalarPair(old.to_scalar(), Scalar::from_bool(cmpxchg_success));
931
932 if cmpxchg_success {
936 this.allow_data_races_mut(|this| this.write_scalar(new, place))?;
937 this.validate_atomic_rmw(place, success)?;
938 this.buffered_atomic_rmw(new, place, success, old.to_scalar())?;
939 } else {
940 this.validate_atomic_load(place, fail, None)?;
941 this.perform_read_on_buffered_latest(place, fail)?;
946 }
947
948 interp_ok(res)
950 }
951
952 fn atomic_fence(&mut self, atomic: AtomicFenceOrd) -> InterpResult<'tcx> {
954 let this = self.eval_context_mut();
955 let machine = &this.machine;
956 match &this.machine.data_race {
957 GlobalDataRaceHandler::None => interp_ok(()),
958 GlobalDataRaceHandler::Vclocks(data_race) => data_race.atomic_fence(machine, atomic),
959 GlobalDataRaceHandler::Genmc(genmc_ctx) => genmc_ctx.atomic_fence(machine, atomic),
960 }
961 }
962
963 fn release_clock<R>(
969 &self,
970 callback: impl FnOnce(&VClock) -> R,
971 ) -> InterpResult<'tcx, Option<R>> {
972 let this = self.eval_context_ref();
973 interp_ok(match &this.machine.data_race {
974 GlobalDataRaceHandler::None => None,
975 GlobalDataRaceHandler::Genmc(_genmc_ctx) =>
976 throw_unsup_format!(
977 "this operation performs synchronization that is not supported in GenMC mode"
978 ),
979 GlobalDataRaceHandler::Vclocks(data_race) =>
980 Some(data_race.release_clock(&this.machine.threads, callback)),
981 })
982 }
983
984 fn acquire_clock(&self, clock: &VClock) -> InterpResult<'tcx> {
987 let this = self.eval_context_ref();
988 match &this.machine.data_race {
989 GlobalDataRaceHandler::None => {}
990 GlobalDataRaceHandler::Genmc(_genmc_ctx) =>
991 throw_unsup_format!(
992 "this operation performs synchronization that is not supported in GenMC mode"
993 ),
994 GlobalDataRaceHandler::Vclocks(data_race) =>
995 data_race.acquire_clock(clock, &this.machine.threads),
996 }
997 interp_ok(())
998 }
999}
1000
1001#[derive(Debug, Clone)]
1003pub struct VClockAlloc {
1004 alloc_ranges: RefCell<DedupRangeMap<MemoryCellClocks>>,
1006}
1007
1008impl VisitProvenance for VClockAlloc {
1009 fn visit_provenance(&self, _visit: &mut VisitWith<'_>) {
1010 }
1012}
1013
1014impl VClockAlloc {
1015 pub fn new_allocation(
1017 global: &GlobalState,
1018 thread_mgr: &ThreadManager<'_>,
1019 len: Size,
1020 kind: MemoryKind,
1021 current_span: Span,
1022 ) -> VClockAlloc {
1023 let (alloc_timestamp, alloc_index) = match kind {
1025 MemoryKind::Machine(
1027 MiriMemoryKind::Rust
1028 | MiriMemoryKind::Miri
1029 | MiriMemoryKind::C
1030 | MiriMemoryKind::WinHeap
1031 | MiriMemoryKind::WinLocal
1032 | MiriMemoryKind::Mmap,
1033 )
1034 | MemoryKind::Stack => {
1035 let (alloc_index, clocks) = global.active_thread_state(thread_mgr);
1036 let mut alloc_timestamp = clocks.clock[alloc_index];
1037 alloc_timestamp.span = current_span;
1038 (alloc_timestamp, alloc_index)
1039 }
1040 MemoryKind::Machine(
1043 MiriMemoryKind::Global
1044 | MiriMemoryKind::Machine
1045 | MiriMemoryKind::Runtime
1046 | MiriMemoryKind::ExternStatic
1047 | MiriMemoryKind::Tls,
1048 )
1049 | MemoryKind::CallerLocation =>
1050 (VTimestamp::ZERO, global.thread_index(ThreadId::MAIN_THREAD)),
1051 };
1052 VClockAlloc {
1053 alloc_ranges: RefCell::new(DedupRangeMap::new(
1054 len,
1055 MemoryCellClocks::new(alloc_timestamp, alloc_index),
1056 )),
1057 }
1058 }
1059
1060 fn find_gt_index(l: &VClock, r: &VClock) -> Option<VectorIdx> {
1063 trace!("Find index where not {:?} <= {:?}", l, r);
1064 let l_slice = l.as_slice();
1065 let r_slice = r.as_slice();
1066 l_slice
1067 .iter()
1068 .zip(r_slice.iter())
1069 .enumerate()
1070 .find_map(|(idx, (&l, &r))| if l > r { Some(idx) } else { None })
1071 .or_else(|| {
1072 if l_slice.len() > r_slice.len() {
1073 let l_remainder_slice = &l_slice[r_slice.len()..];
1078 let idx = l_remainder_slice
1079 .iter()
1080 .enumerate()
1081 .find_map(|(idx, &r)| if r == VTimestamp::ZERO { None } else { Some(idx) })
1082 .expect("Invalid VClock Invariant");
1083 Some(idx + r_slice.len())
1084 } else {
1085 None
1086 }
1087 })
1088 .map(VectorIdx::new)
1089 }
1090
1091 #[cold]
1098 #[inline(never)]
1099 fn report_data_race<'tcx>(
1100 global: &GlobalState,
1101 thread_mgr: &ThreadManager<'_>,
1102 mem_clocks: &MemoryCellClocks,
1103 access: AccessType,
1104 access_size: Size,
1105 ptr_dbg: interpret::Pointer<AllocId>,
1106 ty: Option<Ty<'_>>,
1107 ) -> InterpResult<'tcx> {
1108 let (active_index, active_clocks) = global.active_thread_state(thread_mgr);
1109 let mut other_size = None; let write_clock;
1111 let (other_access, other_thread, other_clock) =
1112 if !access.is_atomic() &&
1114 let Some(atomic) = mem_clocks.atomic() &&
1115 let Some(idx) = Self::find_gt_index(&atomic.write_vector, &active_clocks.clock)
1116 {
1117 (AccessType::AtomicStore, idx, &atomic.write_vector)
1118 } else if !access.is_atomic() &&
1119 let Some(atomic) = mem_clocks.atomic() &&
1120 let Some(idx) = Self::find_gt_index(&atomic.read_vector, &active_clocks.clock)
1121 {
1122 (AccessType::AtomicLoad, idx, &atomic.read_vector)
1123 } else if mem_clocks.write.1 > active_clocks.clock[mem_clocks.write.0] {
1125 write_clock = mem_clocks.write();
1126 (AccessType::NaWrite(mem_clocks.write_type), mem_clocks.write.0, &write_clock)
1127 } else if let Some(idx) = Self::find_gt_index(&mem_clocks.read, &active_clocks.clock) {
1128 (AccessType::NaRead(mem_clocks.read[idx].read_type()), idx, &mem_clocks.read)
1129 } else if access.is_atomic() && let Some(atomic) = mem_clocks.atomic() && atomic.size != Some(access_size) {
1131 other_size = Some(atomic.size.unwrap_or(Size::ZERO));
1134 if let Some(idx) = Self::find_gt_index(&atomic.write_vector, &active_clocks.clock)
1135 {
1136 (AccessType::AtomicStore, idx, &atomic.write_vector)
1137 } else if let Some(idx) =
1138 Self::find_gt_index(&atomic.read_vector, &active_clocks.clock)
1139 {
1140 (AccessType::AtomicLoad, idx, &atomic.read_vector)
1141 } else {
1142 unreachable!(
1143 "Failed to report data-race for mixed-size access: no race found"
1144 )
1145 }
1146 } else {
1147 unreachable!("Failed to report data-race")
1148 };
1149
1150 let active_thread_info = global.print_thread_metadata(thread_mgr, active_index);
1152 let other_thread_info = global.print_thread_metadata(thread_mgr, other_thread);
1153 let involves_non_atomic = !access.is_atomic() || !other_access.is_atomic();
1154
1155 let extra = if other_size.is_some() {
1157 assert!(!involves_non_atomic);
1158 Some("overlapping unsynchronized atomic accesses must use the same access size")
1159 } else if access.is_read() && other_access.is_read() {
1160 panic!("there should be no same-size read-read races")
1161 } else {
1162 None
1163 };
1164 Err(err_machine_stop!(TerminationInfo::DataRace {
1165 involves_non_atomic,
1166 extra,
1167 retag_explain: access.is_retag() || other_access.is_retag(),
1168 ptr: ptr_dbg,
1169 op1: RacingOp {
1170 action: other_access.description(None, other_size),
1171 thread_info: other_thread_info,
1172 span: other_clock.as_slice()[other_thread.index()].span_data(),
1173 },
1174 op2: RacingOp {
1175 action: access.description(ty, other_size.map(|_| access_size)),
1176 thread_info: active_thread_info,
1177 span: active_clocks.clock.as_slice()[active_index.index()].span_data(),
1178 },
1179 }))?
1180 }
1181
1182 pub(super) fn sync_clock(&self, access_range: AllocRange) -> VClock {
1184 let alloc_ranges = self.alloc_ranges.borrow();
1185 let mut clock = VClock::default();
1186 for (_, mem_clocks) in alloc_ranges.iter(access_range.start, access_range.size) {
1187 if let Some(atomic) = mem_clocks.atomic() {
1188 clock.join(&atomic.sync_vector);
1189 }
1190 }
1191 clock
1192 }
1193
1194 pub fn read_non_atomic<'tcx>(
1201 &self,
1202 alloc_id: AllocId,
1203 access_range: AllocRange,
1204 read_type: NaReadType,
1205 ty: Option<Ty<'_>>,
1206 machine: &MiriMachine<'_>,
1207 ) -> InterpResult<'tcx> {
1208 let current_span = machine.current_user_relevant_span();
1209 let global = machine.data_race.as_vclocks_ref().unwrap();
1210 if !global.race_detecting() {
1211 return interp_ok(());
1212 }
1213 let (index, mut thread_clocks) = global.active_thread_state_mut(&machine.threads);
1214 let mut alloc_ranges = self.alloc_ranges.borrow_mut();
1215 for (mem_clocks_range, mem_clocks) in
1216 alloc_ranges.iter_mut(access_range.start, access_range.size)
1217 {
1218 if let Err(DataRace) =
1219 mem_clocks.read_race_detect(&mut thread_clocks, index, read_type, current_span)
1220 {
1221 drop(thread_clocks);
1222 return Self::report_data_race(
1224 global,
1225 &machine.threads,
1226 mem_clocks,
1227 AccessType::NaRead(read_type),
1228 access_range.size,
1229 interpret::Pointer::new(alloc_id, Size::from_bytes(mem_clocks_range.start)),
1230 ty,
1231 );
1232 }
1233 }
1234 interp_ok(())
1235 }
1236
1237 pub fn write_non_atomic<'tcx>(
1243 &mut self,
1244 alloc_id: AllocId,
1245 access_range: AllocRange,
1246 write_type: NaWriteType,
1247 ty: Option<Ty<'_>>,
1248 machine: &mut MiriMachine<'_>,
1249 ) -> InterpResult<'tcx> {
1250 let current_span = machine.current_user_relevant_span();
1251 let global = machine.data_race.as_vclocks_mut().unwrap();
1252 if !global.race_detecting() {
1253 return interp_ok(());
1254 }
1255 let (index, mut thread_clocks) = global.active_thread_state_mut(&machine.threads);
1256 for (mem_clocks_range, mem_clocks) in
1257 self.alloc_ranges.get_mut().iter_mut(access_range.start, access_range.size)
1258 {
1259 if let Err(DataRace) =
1260 mem_clocks.write_race_detect(&mut thread_clocks, index, write_type, current_span)
1261 {
1262 drop(thread_clocks);
1263 return Self::report_data_race(
1265 global,
1266 &machine.threads,
1267 mem_clocks,
1268 AccessType::NaWrite(write_type),
1269 access_range.size,
1270 interpret::Pointer::new(alloc_id, Size::from_bytes(mem_clocks_range.start)),
1271 ty,
1272 );
1273 }
1274 }
1275 interp_ok(())
1276 }
1277}
1278
1279#[derive(Debug, Default)]
1282pub struct FrameState {
1283 local_clocks: RefCell<FxHashMap<mir::Local, LocalClocks>>,
1284}
1285
1286#[derive(Debug)]
1290struct LocalClocks {
1291 write: VTimestamp,
1292 write_type: NaWriteType,
1293 read: VTimestamp,
1294}
1295
1296impl Default for LocalClocks {
1297 fn default() -> Self {
1298 Self { write: VTimestamp::ZERO, write_type: NaWriteType::Allocate, read: VTimestamp::ZERO }
1299 }
1300}
1301
1302impl FrameState {
1303 pub fn local_write(&self, local: mir::Local, storage_live: bool, machine: &MiriMachine<'_>) {
1304 let current_span = machine.current_user_relevant_span();
1305 let global = machine.data_race.as_vclocks_ref().unwrap();
1306 if !global.race_detecting() {
1307 return;
1308 }
1309 let (index, mut thread_clocks) = global.active_thread_state_mut(&machine.threads);
1310 if !current_span.is_dummy() {
1312 thread_clocks.clock.index_mut(index).span = current_span;
1313 }
1314 let mut clocks = self.local_clocks.borrow_mut();
1315 if storage_live {
1316 let new_clocks = LocalClocks {
1317 write: thread_clocks.clock[index],
1318 write_type: NaWriteType::Allocate,
1319 read: VTimestamp::ZERO,
1320 };
1321 clocks.insert(local, new_clocks);
1324 } else {
1325 let clocks = clocks.entry(local).or_default();
1328 clocks.write = thread_clocks.clock[index];
1329 clocks.write_type = NaWriteType::Write;
1330 }
1331 }
1332
1333 pub fn local_read(&self, local: mir::Local, machine: &MiriMachine<'_>) {
1334 let current_span = machine.current_user_relevant_span();
1335 let global = machine.data_race.as_vclocks_ref().unwrap();
1336 if !global.race_detecting() {
1337 return;
1338 }
1339 let (index, mut thread_clocks) = global.active_thread_state_mut(&machine.threads);
1340 if !current_span.is_dummy() {
1342 thread_clocks.clock.index_mut(index).span = current_span;
1343 }
1344 thread_clocks.clock.index_mut(index).set_read_type(NaReadType::Read);
1345 let mut clocks = self.local_clocks.borrow_mut();
1348 let clocks = clocks.entry(local).or_default();
1349 clocks.read = thread_clocks.clock[index];
1350 }
1351
1352 pub fn local_moved_to_memory(
1353 &self,
1354 local: mir::Local,
1355 alloc: &mut VClockAlloc,
1356 machine: &MiriMachine<'_>,
1357 ) {
1358 let global = machine.data_race.as_vclocks_ref().unwrap();
1359 if !global.race_detecting() {
1360 return;
1361 }
1362 let (index, _thread_clocks) = global.active_thread_state_mut(&machine.threads);
1363 let local_clocks = self.local_clocks.borrow_mut().remove(&local).unwrap_or_default();
1367 for (_mem_clocks_range, mem_clocks) in alloc.alloc_ranges.get_mut().iter_mut_all() {
1368 assert_eq!(mem_clocks.write.0, index);
1371 mem_clocks.write = (index, local_clocks.write);
1373 mem_clocks.write_type = local_clocks.write_type;
1374 mem_clocks.read = VClock::new_with_index(index, local_clocks.read);
1375 }
1376 }
1377}
1378
1379impl<'tcx> EvalContextPrivExt<'tcx> for MiriInterpCx<'tcx> {}
1380trait EvalContextPrivExt<'tcx>: MiriInterpCxExt<'tcx> {
1381 #[inline]
1389 fn allow_data_races_ref<R>(&self, op: impl FnOnce(&MiriInterpCx<'tcx>) -> R) -> R {
1390 let this = self.eval_context_ref();
1391 this.machine.data_race.set_ongoing_action_data_race_free(true);
1392 let result = op(this);
1393 this.machine.data_race.set_ongoing_action_data_race_free(false);
1394 result
1395 }
1396
1397 #[inline]
1401 fn allow_data_races_mut<R>(&mut self, op: impl FnOnce(&mut MiriInterpCx<'tcx>) -> R) -> R {
1402 let this = self.eval_context_mut();
1403 this.machine.data_race.set_ongoing_action_data_race_free(true);
1404 let result = op(this);
1405 this.machine.data_race.set_ongoing_action_data_race_free(false);
1406 result
1407 }
1408
1409 fn atomic_access_check(
1411 &self,
1412 place: &MPlaceTy<'tcx>,
1413 access_type: AtomicAccessType,
1414 ) -> InterpResult<'tcx> {
1415 let this = self.eval_context_ref();
1416 let align = Align::from_bytes(place.layout.size.bytes()).unwrap();
1420 this.check_ptr_align(place.ptr(), align)?;
1421 let (alloc_id, _offset, _prov) = this
1429 .ptr_try_get_alloc_id(place.ptr(), 0)
1430 .expect("there are no zero-sized atomic accesses");
1431 if this.get_alloc_mutability(alloc_id)? == Mutability::Not {
1432 match access_type {
1434 AtomicAccessType::Rmw | AtomicAccessType::Store => {
1435 throw_ub_format!(
1436 "atomic store and read-modify-write operations cannot be performed on read-only memory\n\
1437 see <https://doc.rust-lang.org/nightly/std/sync/atomic/index.html#atomic-accesses-to-read-only-memory> for more information"
1438 );
1439 }
1440 AtomicAccessType::Load(_)
1441 if place.layout.size > this.tcx.data_layout().pointer_size() =>
1442 {
1443 throw_ub_format!(
1444 "large atomic load operations cannot be performed on read-only memory\n\
1445 these operations often have to be implemented using read-modify-write operations, which require writeable memory\n\
1446 see <https://doc.rust-lang.org/nightly/std/sync/atomic/index.html#atomic-accesses-to-read-only-memory> for more information"
1447 );
1448 }
1449 AtomicAccessType::Load(o) if o != AtomicReadOrd::Relaxed => {
1450 throw_ub_format!(
1451 "non-relaxed atomic load operations cannot be performed on read-only memory\n\
1452 these operations sometimes have to be implemented using read-modify-write operations, which require writeable memory\n\
1453 see <https://doc.rust-lang.org/nightly/std/sync/atomic/index.html#atomic-accesses-to-read-only-memory> for more information"
1454 );
1455 }
1456 _ => {
1457 }
1459 }
1460 }
1461 interp_ok(())
1462 }
1463
1464 fn validate_atomic_load(
1467 &self,
1468 place: &MPlaceTy<'tcx>,
1469 atomic: AtomicReadOrd,
1470 sync_clock: Option<&VClock>,
1471 ) -> InterpResult<'tcx> {
1472 let this = self.eval_context_ref();
1473 this.validate_atomic_op(
1474 place,
1475 atomic,
1476 AccessType::AtomicLoad,
1477 move |memory, clocks, index, atomic| {
1478 if atomic == AtomicReadOrd::Relaxed {
1479 memory.load_relaxed(&mut *clocks, index, place.layout.size, sync_clock)
1480 } else {
1481 memory.load_acquire(&mut *clocks, index, place.layout.size, sync_clock)
1482 }
1483 },
1484 )
1485 }
1486
1487 fn validate_atomic_store(
1490 &mut self,
1491 place: &MPlaceTy<'tcx>,
1492 atomic: AtomicWriteOrd,
1493 ) -> InterpResult<'tcx> {
1494 let this = self.eval_context_mut();
1495 this.validate_atomic_op(
1496 place,
1497 atomic,
1498 AccessType::AtomicStore,
1499 move |memory, clocks, index, atomic| {
1500 if atomic == AtomicWriteOrd::Relaxed {
1501 memory.store_relaxed(clocks, index, place.layout.size)
1502 } else {
1503 memory.store_release(clocks, index, place.layout.size)
1504 }
1505 },
1506 )
1507 }
1508
1509 fn validate_atomic_rmw(
1512 &mut self,
1513 place: &MPlaceTy<'tcx>,
1514 atomic: AtomicRwOrd,
1515 ) -> InterpResult<'tcx> {
1516 use AtomicRwOrd::*;
1517 let acquire = matches!(atomic, Acquire | AcqRel | SeqCst);
1518 let release = matches!(atomic, Release | AcqRel | SeqCst);
1519 let this = self.eval_context_mut();
1520 this.validate_atomic_op(
1521 place,
1522 atomic,
1523 AccessType::AtomicRmw,
1524 move |memory, clocks, index, _| {
1525 if acquire {
1526 memory.load_acquire(clocks, index, place.layout.size, None)?;
1527 } else {
1528 memory.load_relaxed(clocks, index, place.layout.size, None)?;
1529 }
1530 if release {
1531 memory.rmw_release(clocks, index, place.layout.size)
1532 } else {
1533 memory.rmw_relaxed(clocks, index, place.layout.size)
1534 }
1535 },
1536 )
1537 }
1538
1539 fn get_latest_nonatomic_val(&self, place: &MPlaceTy<'tcx>) -> Result<Option<Scalar>, ()> {
1543 let this = self.eval_context_ref();
1544 let (alloc_id, offset, _prov) = this.ptr_get_alloc_id(place.ptr(), 0).unwrap();
1546 let alloc_meta = &this.get_alloc_extra(alloc_id).unwrap().data_race;
1547 if alloc_meta.as_weak_memory_ref().is_none() {
1548 return Err(());
1550 }
1551 let data_race = alloc_meta.as_vclocks_ref().unwrap();
1552 for (_range, clocks) in data_race.alloc_ranges.borrow_mut().iter(offset, place.layout.size)
1554 {
1555 if clocks.atomic().is_some_and(|atomic| !(atomic.write_vector <= clocks.write())) {
1559 return Err(());
1560 }
1561 }
1562 Ok(this.run_for_validation_ref(|this| this.read_scalar(place)).discard_err())
1566 }
1567
1568 fn validate_atomic_op<A: Debug + Copy>(
1570 &self,
1571 place: &MPlaceTy<'tcx>,
1572 atomic: A,
1573 access: AccessType,
1574 mut op: impl FnMut(
1575 &mut MemoryCellClocks,
1576 &mut ThreadClockSet,
1577 VectorIdx,
1578 A,
1579 ) -> Result<(), DataRace>,
1580 ) -> InterpResult<'tcx> {
1581 let this = self.eval_context_ref();
1582 assert!(access.is_atomic());
1583 let Some(data_race) = this.machine.data_race.as_vclocks_ref() else {
1584 return interp_ok(());
1585 };
1586 if !data_race.race_detecting() {
1587 return interp_ok(());
1588 }
1589 let size = place.layout.size;
1590 let (alloc_id, base_offset, _prov) = this.ptr_get_alloc_id(place.ptr(), 0)?;
1591 let alloc_meta = this.get_alloc_extra(alloc_id)?.data_race.as_vclocks_ref().unwrap();
1594 trace!(
1595 "Atomic op({}) with ordering {:?} on {:?} (size={})",
1596 access.description(None, None),
1597 &atomic,
1598 place.ptr(),
1599 size.bytes()
1600 );
1601
1602 let current_span = this.machine.current_user_relevant_span();
1603 data_race.maybe_perform_sync_operation(
1605 &this.machine.threads,
1606 current_span,
1607 |index, mut thread_clocks| {
1608 for (mem_clocks_range, mem_clocks) in
1609 alloc_meta.alloc_ranges.borrow_mut().iter_mut(base_offset, size)
1610 {
1611 if let Err(DataRace) = op(mem_clocks, &mut thread_clocks, index, atomic) {
1612 mem::drop(thread_clocks);
1613 return VClockAlloc::report_data_race(
1614 data_race,
1615 &this.machine.threads,
1616 mem_clocks,
1617 access,
1618 place.layout.size,
1619 interpret::Pointer::new(
1620 alloc_id,
1621 Size::from_bytes(mem_clocks_range.start),
1622 ),
1623 None,
1624 )
1625 .map(|_| true);
1626 }
1627 }
1628
1629 interp_ok(true)
1631 },
1632 )?;
1633
1634 if tracing::enabled!(tracing::Level::TRACE) {
1636 for (_offset, mem_clocks) in alloc_meta.alloc_ranges.borrow().iter(base_offset, size) {
1637 trace!(
1638 "Updated atomic memory({:?}, size={}) to {:#?}",
1639 place.ptr(),
1640 size.bytes(),
1641 mem_clocks.atomic_ops
1642 );
1643 }
1644 }
1645
1646 interp_ok(())
1647 }
1648}
1649
1650impl GlobalState {
1651 pub fn new(config: &MiriConfig) -> Self {
1654 let mut global_state = GlobalState {
1655 multi_threaded: Cell::new(false),
1656 ongoing_action_data_race_free: Cell::new(false),
1657 vector_clocks: RefCell::new(IndexVec::new()),
1658 vector_info: RefCell::new(IndexVec::new()),
1659 thread_info: RefCell::new(IndexVec::new()),
1660 reuse_candidates: RefCell::new(FxHashSet::default()),
1661 last_sc_fence: RefCell::new(VClock::default()),
1662 last_sc_write_per_thread: RefCell::new(VClock::default()),
1663 track_outdated_loads: config.track_outdated_loads,
1664 weak_memory: config.weak_memory_emulation,
1665 };
1666
1667 let index = global_state.vector_clocks.get_mut().push(ThreadClockSet::default());
1670 global_state.vector_info.get_mut().push(ThreadId::MAIN_THREAD);
1671 global_state
1672 .thread_info
1673 .get_mut()
1674 .push(ThreadExtraState { vector_index: Some(index), termination_vector_clock: None });
1675
1676 global_state
1677 }
1678
1679 fn race_detecting(&self) -> bool {
1683 self.multi_threaded.get() && !self.ongoing_action_data_race_free.get()
1684 }
1685
1686 pub fn ongoing_action_data_race_free(&self) -> bool {
1687 self.ongoing_action_data_race_free.get()
1688 }
1689
1690 fn find_vector_index_reuse_candidate(&self) -> Option<VectorIdx> {
1693 let mut reuse = self.reuse_candidates.borrow_mut();
1694 let vector_clocks = self.vector_clocks.borrow();
1695 for &candidate in reuse.iter() {
1696 let target_timestamp = vector_clocks[candidate].clock[candidate];
1697 if vector_clocks.iter_enumerated().all(|(clock_idx, clock)| {
1698 let no_data_race = clock.clock[candidate] >= target_timestamp;
1701
1702 let vector_terminated = reuse.contains(&clock_idx);
1705
1706 no_data_race || vector_terminated
1709 }) {
1710 assert!(reuse.remove(&candidate));
1715 return Some(candidate);
1716 }
1717 }
1718 None
1719 }
1720
1721 #[inline]
1724 pub fn thread_created(
1725 &mut self,
1726 thread_mgr: &ThreadManager<'_>,
1727 thread: ThreadId,
1728 current_span: Span,
1729 ) {
1730 let current_index = self.active_thread_index(thread_mgr);
1731
1732 self.multi_threaded.set(true);
1735
1736 let mut thread_info = self.thread_info.borrow_mut();
1738 thread_info.ensure_contains_elem(thread, Default::default);
1739
1740 let created_index = if let Some(reuse_index) = self.find_vector_index_reuse_candidate() {
1743 let vector_clocks = self.vector_clocks.get_mut();
1746 vector_clocks[reuse_index].increment_clock(reuse_index, current_span);
1747
1748 let vector_info = self.vector_info.get_mut();
1751 let old_thread = vector_info[reuse_index];
1752 vector_info[reuse_index] = thread;
1753
1754 thread_info[old_thread].vector_index = None;
1757
1758 reuse_index
1759 } else {
1760 let vector_info = self.vector_info.get_mut();
1763 vector_info.push(thread)
1764 };
1765
1766 trace!("Creating thread = {:?} with vector index = {:?}", thread, created_index);
1767
1768 thread_info[thread].vector_index = Some(created_index);
1770
1771 let vector_clocks = self.vector_clocks.get_mut();
1773 if created_index == vector_clocks.next_index() {
1774 vector_clocks.push(ThreadClockSet::default());
1775 }
1776
1777 let (current, created) = vector_clocks.pick2_mut(current_index, created_index);
1779
1780 created.join_with(current);
1783
1784 current.increment_clock(current_index, current_span);
1787 created.increment_clock(created_index, current_span);
1788 }
1789
1790 #[inline]
1794 pub fn thread_joined(&mut self, threads: &ThreadManager<'_>, joinee: ThreadId) {
1795 let thread_info = self.thread_info.borrow();
1796 let thread_info = &thread_info[joinee];
1797
1798 let join_clock = thread_info
1800 .termination_vector_clock
1801 .as_ref()
1802 .expect("joined with thread but thread has not terminated");
1803 self.acquire_clock(join_clock, threads);
1805
1806 if let Some(current_index) = thread_info.vector_index {
1811 if threads.get_live_thread_count() == 1 {
1812 let vector_clocks = self.vector_clocks.get_mut();
1813 let current_clock = &vector_clocks[current_index];
1815 if vector_clocks
1816 .iter_enumerated()
1817 .all(|(idx, clocks)| clocks.clock[idx] <= current_clock.clock[idx])
1818 {
1819 self.multi_threaded.set(false);
1823 }
1824 }
1825 }
1826 }
1827
1828 #[inline]
1836 pub fn thread_terminated(&mut self, thread_mgr: &ThreadManager<'_>) {
1837 let current_thread = thread_mgr.active_thread();
1838 let current_index = self.active_thread_index(thread_mgr);
1839
1840 let terminaion_clock = self.release_clock(thread_mgr, |clock| clock.clone());
1842 self.thread_info.get_mut()[current_thread].termination_vector_clock =
1843 Some(terminaion_clock);
1844
1845 let reuse = self.reuse_candidates.get_mut();
1847 reuse.insert(current_index);
1848 }
1849
1850 fn atomic_fence<'tcx>(
1852 &self,
1853 machine: &MiriMachine<'tcx>,
1854 atomic: AtomicFenceOrd,
1855 ) -> InterpResult<'tcx> {
1856 let current_span = machine.current_user_relevant_span();
1857 self.maybe_perform_sync_operation(&machine.threads, current_span, |index, mut clocks| {
1858 trace!("Atomic fence on {:?} with ordering {:?}", index, atomic);
1859
1860 if atomic != AtomicFenceOrd::Release {
1864 clocks.apply_acquire_fence();
1866 }
1867 if atomic == AtomicFenceOrd::SeqCst {
1868 let mut sc_fence_clock = self.last_sc_fence.borrow_mut();
1876 sc_fence_clock.join(&clocks.clock);
1877 clocks.clock.join(&sc_fence_clock);
1878 clocks.write_seqcst.join(&self.last_sc_write_per_thread.borrow());
1881 }
1882 if atomic != AtomicFenceOrd::Acquire {
1885 clocks.apply_release_fence();
1887 }
1888
1889 interp_ok(atomic != AtomicFenceOrd::Acquire)
1891 })
1892 }
1893
1894 fn maybe_perform_sync_operation<'tcx>(
1902 &self,
1903 thread_mgr: &ThreadManager<'_>,
1904 current_span: Span,
1905 op: impl FnOnce(VectorIdx, RefMut<'_, ThreadClockSet>) -> InterpResult<'tcx, bool>,
1906 ) -> InterpResult<'tcx> {
1907 if self.multi_threaded.get() {
1908 let (index, clocks) = self.active_thread_state_mut(thread_mgr);
1909 if op(index, clocks)? {
1910 let (_, mut clocks) = self.active_thread_state_mut(thread_mgr);
1911 clocks.increment_clock(index, current_span);
1912 }
1913 }
1914 interp_ok(())
1915 }
1916
1917 fn print_thread_metadata(&self, thread_mgr: &ThreadManager<'_>, vector: VectorIdx) -> String {
1920 let thread = self.vector_info.borrow()[vector];
1921 let thread_name = thread_mgr.get_thread_display_name(thread);
1922 format!("thread `{thread_name}`")
1923 }
1924
1925 pub fn acquire_clock<'tcx>(&self, clock: &VClock, threads: &ThreadManager<'tcx>) {
1930 let thread = threads.active_thread();
1931 let (_, mut clocks) = self.thread_state_mut(thread);
1932 clocks.clock.join(clock);
1933 }
1934
1935 pub fn release_clock<'tcx, R>(
1939 &self,
1940 threads: &ThreadManager<'tcx>,
1941 callback: impl FnOnce(&VClock) -> R,
1942 ) -> R {
1943 let thread = threads.active_thread();
1944 let span = threads.active_thread_ref().current_user_relevant_span();
1945 let (index, mut clocks) = self.thread_state_mut(thread);
1946 let r = callback(&clocks.clock);
1947 clocks.increment_clock(index, span);
1950
1951 r
1952 }
1953
1954 fn thread_index(&self, thread: ThreadId) -> VectorIdx {
1955 self.thread_info.borrow()[thread].vector_index.expect("thread has no assigned vector")
1956 }
1957
1958 #[inline]
1961 fn thread_state_mut(&self, thread: ThreadId) -> (VectorIdx, RefMut<'_, ThreadClockSet>) {
1962 let index = self.thread_index(thread);
1963 let ref_vector = self.vector_clocks.borrow_mut();
1964 let clocks = RefMut::map(ref_vector, |vec| &mut vec[index]);
1965 (index, clocks)
1966 }
1967
1968 #[inline]
1971 fn thread_state(&self, thread: ThreadId) -> (VectorIdx, Ref<'_, ThreadClockSet>) {
1972 let index = self.thread_index(thread);
1973 let ref_vector = self.vector_clocks.borrow();
1974 let clocks = Ref::map(ref_vector, |vec| &vec[index]);
1975 (index, clocks)
1976 }
1977
1978 #[inline]
1981 pub(super) fn active_thread_state(
1982 &self,
1983 thread_mgr: &ThreadManager<'_>,
1984 ) -> (VectorIdx, Ref<'_, ThreadClockSet>) {
1985 self.thread_state(thread_mgr.active_thread())
1986 }
1987
1988 #[inline]
1991 pub(super) fn active_thread_state_mut(
1992 &self,
1993 thread_mgr: &ThreadManager<'_>,
1994 ) -> (VectorIdx, RefMut<'_, ThreadClockSet>) {
1995 self.thread_state_mut(thread_mgr.active_thread())
1996 }
1997
1998 #[inline]
2001 fn active_thread_index(&self, thread_mgr: &ThreadManager<'_>) -> VectorIdx {
2002 let active_thread_id = thread_mgr.active_thread();
2003 self.thread_index(active_thread_id)
2004 }
2005
2006 pub(super) fn sc_write(&self, thread_mgr: &ThreadManager<'_>) {
2008 let (index, clocks) = self.active_thread_state(thread_mgr);
2009 self.last_sc_write_per_thread.borrow_mut().set_at_index(&clocks.clock, index);
2010 }
2011
2012 pub(super) fn sc_read(&self, thread_mgr: &ThreadManager<'_>) {
2014 let (.., mut clocks) = self.active_thread_state_mut(thread_mgr);
2015 clocks.read_seqcst.join(&self.last_sc_fence.borrow());
2016 }
2017}