1use std::cell::{Cell, Ref, RefCell, RefMut};
44use std::fmt::Debug;
45use std::mem;
46
47use rustc_abi::{Align, HasDataLayout, Size};
48use rustc_ast::Mutability;
49use rustc_data_structures::fx::{FxHashMap, FxHashSet};
50use rustc_index::{Idx, IndexVec};
51use rustc_middle::mir;
52use rustc_middle::ty::Ty;
53use rustc_span::Span;
54
55use super::vector_clock::{VClock, VTimestamp, VectorIdx};
56use super::weak_memory::EvalContextExt as _;
57use crate::concurrency::GlobalDataRaceHandler;
58use crate::diagnostics::RacingOp;
59use crate::*;
60
61pub type AllocState = VClockAlloc;
62
63#[derive(Copy, Clone, PartialEq, Eq, Debug)]
65pub enum AtomicRwOrd {
66 Relaxed,
67 Acquire,
68 Release,
69 AcqRel,
70 SeqCst,
71}
72
73#[derive(Copy, Clone, PartialEq, Eq, Debug)]
75pub enum AtomicReadOrd {
76 Relaxed,
77 Acquire,
78 SeqCst,
79}
80
81#[derive(Copy, Clone, PartialEq, Eq, Debug)]
83pub enum AtomicWriteOrd {
84 Relaxed,
85 Release,
86 SeqCst,
87}
88
89#[derive(Copy, Clone, PartialEq, Eq, Debug)]
91pub enum AtomicFenceOrd {
92 Acquire,
93 Release,
94 AcqRel,
95 SeqCst,
96}
97
98#[derive(Clone, Default, Debug)]
102pub(super) struct ThreadClockSet {
103 pub(super) clock: VClock,
106
107 fence_acquire: VClock,
110
111 fence_release: VClock,
114
115 pub(super) write_seqcst: VClock,
120
121 pub(super) read_seqcst: VClock,
126}
127
128impl ThreadClockSet {
129 #[inline]
132 fn apply_release_fence(&mut self) {
133 self.fence_release.clone_from(&self.clock);
134 }
135
136 #[inline]
139 fn apply_acquire_fence(&mut self) {
140 self.clock.join(&self.fence_acquire);
141 }
142
143 #[inline]
146 fn increment_clock(&mut self, index: VectorIdx, current_span: Span) {
147 self.clock.increment_index(index, current_span);
148 }
149
150 fn join_with(&mut self, other: &ThreadClockSet) {
154 self.clock.join(&other.clock);
155 }
156}
157
158#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)]
161pub struct DataRace;
162
163#[derive(Clone, PartialEq, Eq, Debug)]
168struct AtomicMemoryCellClocks {
169 read_vector: VClock,
174
175 write_vector: VClock,
180
181 sync_vector: VClock,
186
187 size: Option<Size>,
192}
193
194#[derive(Copy, Clone, PartialEq, Eq, Debug)]
195enum AtomicAccessType {
196 Load(AtomicReadOrd),
197 Store,
198 Rmw,
199}
200
201#[derive(Copy, Clone, PartialEq, Eq, Debug)]
203pub enum NaReadType {
204 Read,
206
207 Retag,
209}
210
211impl NaReadType {
212 fn description(self) -> &'static str {
213 match self {
214 NaReadType::Read => "non-atomic read",
215 NaReadType::Retag => "retag read",
216 }
217 }
218}
219
220#[derive(Copy, Clone, PartialEq, Eq, Debug)]
223pub enum NaWriteType {
224 Allocate,
226
227 Write,
229
230 Retag,
232
233 Deallocate,
238}
239
240impl NaWriteType {
241 fn description(self) -> &'static str {
242 match self {
243 NaWriteType::Allocate => "creating a new allocation",
244 NaWriteType::Write => "non-atomic write",
245 NaWriteType::Retag => "retag write",
246 NaWriteType::Deallocate => "deallocation",
247 }
248 }
249}
250
251#[derive(Copy, Clone, PartialEq, Eq, Debug)]
252enum AccessType {
253 NaRead(NaReadType),
254 NaWrite(NaWriteType),
255 AtomicLoad,
256 AtomicStore,
257 AtomicRmw,
258}
259
260#[derive(Clone, PartialEq, Eq, Debug)]
262struct MemoryCellClocks {
263 write: (VectorIdx, VTimestamp),
267
268 write_type: NaWriteType,
272
273 read: VClock,
277
278 atomic_ops: Option<Box<AtomicMemoryCellClocks>>,
282}
283
284#[derive(Debug, Clone, Default)]
286struct ThreadExtraState {
287 vector_index: Option<VectorIdx>,
293
294 termination_vector_clock: Option<VClock>,
299}
300
301#[derive(Debug, Clone)]
306pub struct GlobalState {
307 multi_threaded: Cell<bool>,
314
315 ongoing_action_data_race_free: Cell<bool>,
319
320 vector_clocks: RefCell<IndexVec<VectorIdx, ThreadClockSet>>,
324
325 vector_info: RefCell<IndexVec<VectorIdx, ThreadId>>,
329
330 thread_info: RefCell<IndexVec<ThreadId, ThreadExtraState>>,
332
333 reuse_candidates: RefCell<FxHashSet<VectorIdx>>,
341
342 last_sc_fence: RefCell<VClock>,
345
346 last_sc_write_per_thread: RefCell<VClock>,
349
350 pub track_outdated_loads: bool,
352
353 pub weak_memory: bool,
355}
356
357impl VisitProvenance for GlobalState {
358 fn visit_provenance(&self, _visit: &mut VisitWith<'_>) {
359 }
361}
362
363impl AccessType {
364 fn description(self, ty: Option<Ty<'_>>, size: Option<Size>) -> String {
365 let mut msg = String::new();
366
367 if let Some(size) = size {
368 if size == Size::ZERO {
369 assert!(self == AccessType::AtomicLoad);
373 assert!(ty.is_none());
374 return format!("multiple differently-sized atomic loads, including one load");
375 }
376 msg.push_str(&format!("{}-byte {}", size.bytes(), msg))
377 }
378
379 msg.push_str(match self {
380 AccessType::NaRead(w) => w.description(),
381 AccessType::NaWrite(w) => w.description(),
382 AccessType::AtomicLoad => "atomic load",
383 AccessType::AtomicStore => "atomic store",
384 AccessType::AtomicRmw => "atomic read-modify-write",
385 });
386
387 if let Some(ty) = ty {
388 msg.push_str(&format!(" of type `{ty}`"));
389 }
390
391 msg
392 }
393
394 fn is_atomic(self) -> bool {
395 match self {
396 AccessType::AtomicLoad | AccessType::AtomicStore | AccessType::AtomicRmw => true,
397 AccessType::NaRead(_) | AccessType::NaWrite(_) => false,
398 }
399 }
400
401 fn is_read(self) -> bool {
402 match self {
403 AccessType::AtomicLoad | AccessType::NaRead(_) => true,
404 AccessType::NaWrite(_) | AccessType::AtomicStore | AccessType::AtomicRmw => false,
405 }
406 }
407
408 fn is_retag(self) -> bool {
409 matches!(
410 self,
411 AccessType::NaRead(NaReadType::Retag) | AccessType::NaWrite(NaWriteType::Retag)
412 )
413 }
414}
415
416impl AtomicMemoryCellClocks {
417 fn new(size: Size) -> Self {
418 AtomicMemoryCellClocks {
419 read_vector: Default::default(),
420 write_vector: Default::default(),
421 sync_vector: Default::default(),
422 size: Some(size),
423 }
424 }
425}
426
427impl MemoryCellClocks {
428 fn new(alloc: VTimestamp, alloc_index: VectorIdx) -> Self {
431 MemoryCellClocks {
432 read: VClock::default(),
433 write: (alloc_index, alloc),
434 write_type: NaWriteType::Allocate,
435 atomic_ops: None,
436 }
437 }
438
439 #[inline]
440 fn write_was_before(&self, other: &VClock) -> bool {
441 self.write.1 <= other[self.write.0]
444 }
445
446 #[inline]
447 fn write(&self) -> VClock {
448 VClock::new_with_index(self.write.0, self.write.1)
449 }
450
451 #[inline]
453 fn atomic(&self) -> Option<&AtomicMemoryCellClocks> {
454 self.atomic_ops.as_deref()
455 }
456
457 #[inline]
459 fn atomic_mut_unwrap(&mut self) -> &mut AtomicMemoryCellClocks {
460 self.atomic_ops.as_deref_mut().unwrap()
461 }
462
463 fn atomic_access(
466 &mut self,
467 thread_clocks: &ThreadClockSet,
468 size: Size,
469 write: bool,
470 ) -> Result<&mut AtomicMemoryCellClocks, DataRace> {
471 match self.atomic_ops {
472 Some(ref mut atomic) => {
473 if atomic.size == Some(size) {
475 Ok(atomic)
476 } else if atomic.read_vector <= thread_clocks.clock
477 && atomic.write_vector <= thread_clocks.clock
478 {
479 atomic.size = Some(size);
481 Ok(atomic)
482 } else if !write && atomic.write_vector <= thread_clocks.clock {
483 atomic.size = None;
486 Ok(atomic)
487 } else {
488 Err(DataRace)
489 }
490 }
491 None => {
492 self.atomic_ops = Some(Box::new(AtomicMemoryCellClocks::new(size)));
493 Ok(self.atomic_ops.as_mut().unwrap())
494 }
495 }
496 }
497
498 fn load_acquire(
502 &mut self,
503 thread_clocks: &mut ThreadClockSet,
504 index: VectorIdx,
505 access_size: Size,
506 ) -> Result<(), DataRace> {
507 self.atomic_read_detect(thread_clocks, index, access_size)?;
508 if let Some(atomic) = self.atomic() {
509 thread_clocks.clock.join(&atomic.sync_vector);
510 }
511 Ok(())
512 }
513
514 fn load_relaxed(
518 &mut self,
519 thread_clocks: &mut ThreadClockSet,
520 index: VectorIdx,
521 access_size: Size,
522 ) -> Result<(), DataRace> {
523 self.atomic_read_detect(thread_clocks, index, access_size)?;
524 if let Some(atomic) = self.atomic() {
525 thread_clocks.fence_acquire.join(&atomic.sync_vector);
526 }
527 Ok(())
528 }
529
530 fn store_release(
533 &mut self,
534 thread_clocks: &ThreadClockSet,
535 index: VectorIdx,
536 access_size: Size,
537 ) -> Result<(), DataRace> {
538 self.atomic_write_detect(thread_clocks, index, access_size)?;
539 let atomic = self.atomic_mut_unwrap(); atomic.sync_vector.clone_from(&thread_clocks.clock);
541 Ok(())
542 }
543
544 fn store_relaxed(
547 &mut self,
548 thread_clocks: &ThreadClockSet,
549 index: VectorIdx,
550 access_size: Size,
551 ) -> Result<(), DataRace> {
552 self.atomic_write_detect(thread_clocks, index, access_size)?;
553
554 let atomic = self.atomic_mut_unwrap();
559 atomic.sync_vector.clone_from(&thread_clocks.fence_release);
560 Ok(())
561 }
562
563 fn rmw_release(
566 &mut self,
567 thread_clocks: &ThreadClockSet,
568 index: VectorIdx,
569 access_size: Size,
570 ) -> Result<(), DataRace> {
571 self.atomic_write_detect(thread_clocks, index, access_size)?;
572 let atomic = self.atomic_mut_unwrap();
573 atomic.sync_vector.join(&thread_clocks.clock);
574 Ok(())
575 }
576
577 fn rmw_relaxed(
580 &mut self,
581 thread_clocks: &ThreadClockSet,
582 index: VectorIdx,
583 access_size: Size,
584 ) -> Result<(), DataRace> {
585 self.atomic_write_detect(thread_clocks, index, access_size)?;
586 let atomic = self.atomic_mut_unwrap();
587 atomic.sync_vector.join(&thread_clocks.fence_release);
588 Ok(())
589 }
590
591 fn atomic_read_detect(
594 &mut self,
595 thread_clocks: &ThreadClockSet,
596 index: VectorIdx,
597 access_size: Size,
598 ) -> Result<(), DataRace> {
599 trace!("Atomic read with vectors: {:#?} :: {:#?}", self, thread_clocks);
600 let atomic = self.atomic_access(thread_clocks, access_size, false)?;
601 atomic.read_vector.set_at_index(&thread_clocks.clock, index);
602 if self.write_was_before(&thread_clocks.clock) { Ok(()) } else { Err(DataRace) }
604 }
605
606 fn atomic_write_detect(
609 &mut self,
610 thread_clocks: &ThreadClockSet,
611 index: VectorIdx,
612 access_size: Size,
613 ) -> Result<(), DataRace> {
614 trace!("Atomic write with vectors: {:#?} :: {:#?}", self, thread_clocks);
615 let atomic = self.atomic_access(thread_clocks, access_size, true)?;
616 atomic.write_vector.set_at_index(&thread_clocks.clock, index);
617 if self.write_was_before(&thread_clocks.clock) && self.read <= thread_clocks.clock {
619 Ok(())
620 } else {
621 Err(DataRace)
622 }
623 }
624
625 fn read_race_detect(
628 &mut self,
629 thread_clocks: &mut ThreadClockSet,
630 index: VectorIdx,
631 read_type: NaReadType,
632 current_span: Span,
633 ) -> Result<(), DataRace> {
634 trace!("Unsynchronized read with vectors: {:#?} :: {:#?}", self, thread_clocks);
635 if !current_span.is_dummy() {
636 thread_clocks.clock.index_mut(index).span = current_span;
637 }
638 thread_clocks.clock.index_mut(index).set_read_type(read_type);
639 if self.write_was_before(&thread_clocks.clock) {
640 let race_free = if let Some(atomic) = self.atomic() {
642 atomic.write_vector <= thread_clocks.clock
643 } else {
644 true
645 };
646 self.read.set_at_index(&thread_clocks.clock, index);
647 if race_free { Ok(()) } else { Err(DataRace) }
648 } else {
649 Err(DataRace)
650 }
651 }
652
653 fn write_race_detect(
656 &mut self,
657 thread_clocks: &mut ThreadClockSet,
658 index: VectorIdx,
659 write_type: NaWriteType,
660 current_span: Span,
661 ) -> Result<(), DataRace> {
662 trace!("Unsynchronized write with vectors: {:#?} :: {:#?}", self, thread_clocks);
663 if !current_span.is_dummy() {
664 thread_clocks.clock.index_mut(index).span = current_span;
665 }
666 if self.write_was_before(&thread_clocks.clock) && self.read <= thread_clocks.clock {
667 let race_free = if let Some(atomic) = self.atomic() {
668 atomic.write_vector <= thread_clocks.clock
669 && atomic.read_vector <= thread_clocks.clock
670 } else {
671 true
672 };
673 self.write = (index, thread_clocks.clock[index]);
674 self.write_type = write_type;
675 if race_free {
676 self.read.set_zero_vector();
677 Ok(())
678 } else {
679 Err(DataRace)
680 }
681 } else {
682 Err(DataRace)
683 }
684 }
685}
686
687impl GlobalDataRaceHandler {
688 fn set_ongoing_action_data_race_free(&self, enable: bool) {
691 match self {
692 GlobalDataRaceHandler::None => {}
693 GlobalDataRaceHandler::Vclocks(data_race) => {
694 let old = data_race.ongoing_action_data_race_free.replace(enable);
695 assert_ne!(old, enable, "cannot nest allow_data_races");
696 }
697 GlobalDataRaceHandler::Genmc(genmc_ctx) => {
698 genmc_ctx.set_ongoing_action_data_race_free(enable);
699 }
700 }
701 }
702}
703
704impl<'tcx> EvalContextExt<'tcx> for MiriInterpCx<'tcx> {}
706pub trait EvalContextExt<'tcx>: MiriInterpCxExt<'tcx> {
707 fn read_scalar_atomic(
709 &self,
710 place: &MPlaceTy<'tcx>,
711 atomic: AtomicReadOrd,
712 ) -> InterpResult<'tcx, Scalar> {
713 let this = self.eval_context_ref();
714 this.atomic_access_check(place, AtomicAccessType::Load(atomic))?;
715 if let Some(genmc_ctx) = this.machine.data_race.as_genmc_ref() {
722 let old_val = None;
724 return genmc_ctx.atomic_load(
725 this,
726 place.ptr().addr(),
727 place.layout.size,
728 atomic,
729 old_val,
730 );
731 }
732
733 let scalar = this.allow_data_races_ref(move |this| this.read_scalar(place))?;
734 let buffered_scalar = this.buffered_atomic_read(place, atomic, scalar, || {
735 this.validate_atomic_load(place, atomic)
736 })?;
737 interp_ok(buffered_scalar.ok_or_else(|| err_ub!(InvalidUninitBytes(None)))?)
738 }
739
740 fn write_scalar_atomic(
742 &mut self,
743 val: Scalar,
744 dest: &MPlaceTy<'tcx>,
745 atomic: AtomicWriteOrd,
746 ) -> InterpResult<'tcx> {
747 let this = self.eval_context_mut();
748 this.atomic_access_check(dest, AtomicAccessType::Store)?;
749
750 let old_val = this.run_for_validation_mut(|this| this.read_scalar(dest)).discard_err();
755 if let Some(genmc_ctx) = this.machine.data_race.as_genmc_ref() {
757 genmc_ctx.atomic_store(this, dest.ptr().addr(), dest.layout.size, val, atomic)?;
759 return interp_ok(());
760 }
761 this.allow_data_races_mut(move |this| this.write_scalar(val, dest))?;
762 this.validate_atomic_store(dest, atomic)?;
763 this.buffered_atomic_write(val, dest, atomic, old_val)
764 }
765
766 fn atomic_rmw_op_immediate(
768 &mut self,
769 place: &MPlaceTy<'tcx>,
770 rhs: &ImmTy<'tcx>,
771 op: mir::BinOp,
772 not: bool,
773 atomic: AtomicRwOrd,
774 ) -> InterpResult<'tcx, ImmTy<'tcx>> {
775 let this = self.eval_context_mut();
776 this.atomic_access_check(place, AtomicAccessType::Rmw)?;
777
778 let old = this.allow_data_races_mut(|this| this.read_immediate(place))?;
779
780 if let Some(genmc_ctx) = this.machine.data_race.as_genmc_ref() {
782 let (old_val, new_val) = genmc_ctx.atomic_rmw_op(
784 this,
785 place.ptr().addr(),
786 place.layout.size,
787 atomic,
788 (op, not),
789 rhs.to_scalar(),
790 )?;
791 this.allow_data_races_mut(|this| this.write_scalar(new_val, place))?;
792 return interp_ok(ImmTy::from_scalar(old_val, old.layout));
793 }
794
795 let val = this.binary_op(op, &old, rhs)?;
796 let val = if not { this.unary_op(mir::UnOp::Not, &val)? } else { val };
797 this.allow_data_races_mut(|this| this.write_immediate(*val, place))?;
798
799 this.validate_atomic_rmw(place, atomic)?;
800
801 this.buffered_atomic_rmw(val.to_scalar(), place, atomic, old.to_scalar())?;
802 interp_ok(old)
803 }
804
805 fn atomic_exchange_scalar(
808 &mut self,
809 place: &MPlaceTy<'tcx>,
810 new: Scalar,
811 atomic: AtomicRwOrd,
812 ) -> InterpResult<'tcx, Scalar> {
813 let this = self.eval_context_mut();
814 this.atomic_access_check(place, AtomicAccessType::Rmw)?;
815
816 let old = this.allow_data_races_mut(|this| this.read_scalar(place))?;
817 this.allow_data_races_mut(|this| this.write_scalar(new, place))?;
818
819 if let Some(genmc_ctx) = this.machine.data_race.as_genmc_ref() {
821 let (old_val, _is_success) = genmc_ctx.atomic_exchange(
823 this,
824 place.ptr().addr(),
825 place.layout.size,
826 new,
827 atomic,
828 )?;
829 return interp_ok(old_val);
830 }
831
832 this.validate_atomic_rmw(place, atomic)?;
833
834 this.buffered_atomic_rmw(new, place, atomic, old)?;
835 interp_ok(old)
836 }
837
838 fn atomic_min_max_scalar(
841 &mut self,
842 place: &MPlaceTy<'tcx>,
843 rhs: ImmTy<'tcx>,
844 min: bool,
845 atomic: AtomicRwOrd,
846 ) -> InterpResult<'tcx, ImmTy<'tcx>> {
847 let this = self.eval_context_mut();
848 this.atomic_access_check(place, AtomicAccessType::Rmw)?;
849
850 let old = this.allow_data_races_mut(|this| this.read_immediate(place))?;
851
852 if let Some(genmc_ctx) = this.machine.data_race.as_genmc_ref() {
854 let (old_val, new_val) = genmc_ctx.atomic_min_max_op(
856 this,
857 place.ptr().addr(),
858 place.layout.size,
859 atomic,
860 min,
861 old.layout.backend_repr.is_signed(),
862 rhs.to_scalar(),
863 )?;
864 this.allow_data_races_mut(|this| this.write_scalar(new_val, place))?;
865 return interp_ok(ImmTy::from_scalar(old_val, old.layout));
866 }
867
868 let lt = this.binary_op(mir::BinOp::Lt, &old, &rhs)?.to_scalar().to_bool()?;
869
870 #[rustfmt::skip] let new_val = if min {
872 if lt { &old } else { &rhs }
873 } else {
874 if lt { &rhs } else { &old }
875 };
876
877 this.allow_data_races_mut(|this| this.write_immediate(**new_val, place))?;
878
879 this.validate_atomic_rmw(place, atomic)?;
880
881 this.buffered_atomic_rmw(new_val.to_scalar(), place, atomic, old.to_scalar())?;
882
883 interp_ok(old)
885 }
886
887 fn atomic_compare_exchange_scalar(
894 &mut self,
895 place: &MPlaceTy<'tcx>,
896 expect_old: &ImmTy<'tcx>,
897 new: Scalar,
898 success: AtomicRwOrd,
899 fail: AtomicReadOrd,
900 can_fail_spuriously: bool,
901 ) -> InterpResult<'tcx, Immediate<Provenance>> {
902 use rand::Rng as _;
903 let this = self.eval_context_mut();
904 this.atomic_access_check(place, AtomicAccessType::Rmw)?;
905
906 let old = this.allow_data_races_mut(|this| this.read_immediate(place))?;
911
912 if let Some(genmc_ctx) = this.machine.data_race.as_genmc_ref() {
914 let (old, cmpxchg_success) = genmc_ctx.atomic_compare_exchange(
915 this,
916 place.ptr().addr(),
917 place.layout.size,
918 this.read_scalar(expect_old)?,
919 new,
920 success,
921 fail,
922 can_fail_spuriously,
923 )?;
924 if cmpxchg_success {
925 this.allow_data_races_mut(|this| this.write_scalar(new, place))?;
926 }
927 return interp_ok(Immediate::ScalarPair(old, Scalar::from_bool(cmpxchg_success)));
928 }
929
930 let eq = this.binary_op(mir::BinOp::Eq, &old, expect_old)?;
932 let success_rate = 1.0 - this.machine.cmpxchg_weak_failure_rate;
935 let cmpxchg_success = eq.to_scalar().to_bool()?
936 && if can_fail_spuriously {
937 this.machine.rng.get_mut().random_bool(success_rate)
938 } else {
939 true
940 };
941 let res = Immediate::ScalarPair(old.to_scalar(), Scalar::from_bool(cmpxchg_success));
942
943 if cmpxchg_success {
947 this.allow_data_races_mut(|this| this.write_scalar(new, place))?;
948 this.validate_atomic_rmw(place, success)?;
949 this.buffered_atomic_rmw(new, place, success, old.to_scalar())?;
950 } else {
951 this.validate_atomic_load(place, fail)?;
952 this.perform_read_on_buffered_latest(place, fail)?;
957 }
958
959 interp_ok(res)
961 }
962
963 fn atomic_fence(&mut self, atomic: AtomicFenceOrd) -> InterpResult<'tcx> {
965 let this = self.eval_context_mut();
966 let machine = &this.machine;
967 match &this.machine.data_race {
968 GlobalDataRaceHandler::None => interp_ok(()),
969 GlobalDataRaceHandler::Vclocks(data_race) => data_race.atomic_fence(machine, atomic),
970 GlobalDataRaceHandler::Genmc(genmc_ctx) => genmc_ctx.atomic_fence(machine, atomic),
971 }
972 }
973
974 fn allow_data_races_all_threads_done(&mut self) {
977 let this = self.eval_context_ref();
978 assert!(this.have_all_terminated());
979 this.machine.data_race.set_ongoing_action_data_race_free(true);
980 }
981
982 fn release_clock<R>(&self, callback: impl FnOnce(&VClock) -> R) -> Option<R> {
988 let this = self.eval_context_ref();
989 Some(
990 this.machine.data_race.as_vclocks_ref()?.release_clock(&this.machine.threads, callback),
991 )
992 }
993
994 fn acquire_clock(&self, clock: &VClock) {
997 let this = self.eval_context_ref();
998 if let Some(data_race) = this.machine.data_race.as_vclocks_ref() {
999 data_race.acquire_clock(clock, &this.machine.threads);
1000 }
1001 }
1002}
1003
1004#[derive(Debug, Clone)]
1006pub struct VClockAlloc {
1007 alloc_ranges: RefCell<RangeMap<MemoryCellClocks>>,
1009}
1010
1011impl VisitProvenance for VClockAlloc {
1012 fn visit_provenance(&self, _visit: &mut VisitWith<'_>) {
1013 }
1015}
1016
1017impl VClockAlloc {
1018 pub fn new_allocation(
1020 global: &GlobalState,
1021 thread_mgr: &ThreadManager<'_>,
1022 len: Size,
1023 kind: MemoryKind,
1024 current_span: Span,
1025 ) -> VClockAlloc {
1026 let (alloc_timestamp, alloc_index) = match kind {
1028 MemoryKind::Machine(
1030 MiriMemoryKind::Rust
1031 | MiriMemoryKind::Miri
1032 | MiriMemoryKind::C
1033 | MiriMemoryKind::WinHeap
1034 | MiriMemoryKind::WinLocal
1035 | MiriMemoryKind::Mmap,
1036 )
1037 | MemoryKind::Stack => {
1038 let (alloc_index, clocks) = global.active_thread_state(thread_mgr);
1039 let mut alloc_timestamp = clocks.clock[alloc_index];
1040 alloc_timestamp.span = current_span;
1041 (alloc_timestamp, alloc_index)
1042 }
1043 MemoryKind::Machine(
1046 MiriMemoryKind::Global
1047 | MiriMemoryKind::Machine
1048 | MiriMemoryKind::Runtime
1049 | MiriMemoryKind::ExternStatic
1050 | MiriMemoryKind::Tls,
1051 )
1052 | MemoryKind::CallerLocation =>
1053 (VTimestamp::ZERO, global.thread_index(ThreadId::MAIN_THREAD)),
1054 };
1055 VClockAlloc {
1056 alloc_ranges: RefCell::new(RangeMap::new(
1057 len,
1058 MemoryCellClocks::new(alloc_timestamp, alloc_index),
1059 )),
1060 }
1061 }
1062
1063 fn find_gt_index(l: &VClock, r: &VClock) -> Option<VectorIdx> {
1066 trace!("Find index where not {:?} <= {:?}", l, r);
1067 let l_slice = l.as_slice();
1068 let r_slice = r.as_slice();
1069 l_slice
1070 .iter()
1071 .zip(r_slice.iter())
1072 .enumerate()
1073 .find_map(|(idx, (&l, &r))| if l > r { Some(idx) } else { None })
1074 .or_else(|| {
1075 if l_slice.len() > r_slice.len() {
1076 let l_remainder_slice = &l_slice[r_slice.len()..];
1081 let idx = l_remainder_slice
1082 .iter()
1083 .enumerate()
1084 .find_map(|(idx, &r)| if r == VTimestamp::ZERO { None } else { Some(idx) })
1085 .expect("Invalid VClock Invariant");
1086 Some(idx + r_slice.len())
1087 } else {
1088 None
1089 }
1090 })
1091 .map(VectorIdx::new)
1092 }
1093
1094 #[cold]
1101 #[inline(never)]
1102 fn report_data_race<'tcx>(
1103 global: &GlobalState,
1104 thread_mgr: &ThreadManager<'_>,
1105 mem_clocks: &MemoryCellClocks,
1106 access: AccessType,
1107 access_size: Size,
1108 ptr_dbg: interpret::Pointer<AllocId>,
1109 ty: Option<Ty<'_>>,
1110 ) -> InterpResult<'tcx> {
1111 let (active_index, active_clocks) = global.active_thread_state(thread_mgr);
1112 let mut other_size = None; let write_clock;
1114 let (other_access, other_thread, other_clock) =
1115 if !access.is_atomic() &&
1117 let Some(atomic) = mem_clocks.atomic() &&
1118 let Some(idx) = Self::find_gt_index(&atomic.write_vector, &active_clocks.clock)
1119 {
1120 (AccessType::AtomicStore, idx, &atomic.write_vector)
1121 } else if !access.is_atomic() &&
1122 let Some(atomic) = mem_clocks.atomic() &&
1123 let Some(idx) = Self::find_gt_index(&atomic.read_vector, &active_clocks.clock)
1124 {
1125 (AccessType::AtomicLoad, idx, &atomic.read_vector)
1126 } else if mem_clocks.write.1 > active_clocks.clock[mem_clocks.write.0] {
1128 write_clock = mem_clocks.write();
1129 (AccessType::NaWrite(mem_clocks.write_type), mem_clocks.write.0, &write_clock)
1130 } else if let Some(idx) = Self::find_gt_index(&mem_clocks.read, &active_clocks.clock) {
1131 (AccessType::NaRead(mem_clocks.read[idx].read_type()), idx, &mem_clocks.read)
1132 } else if access.is_atomic() && let Some(atomic) = mem_clocks.atomic() && atomic.size != Some(access_size) {
1134 other_size = Some(atomic.size.unwrap_or(Size::ZERO));
1137 if let Some(idx) = Self::find_gt_index(&atomic.write_vector, &active_clocks.clock)
1138 {
1139 (AccessType::AtomicStore, idx, &atomic.write_vector)
1140 } else if let Some(idx) =
1141 Self::find_gt_index(&atomic.read_vector, &active_clocks.clock)
1142 {
1143 (AccessType::AtomicLoad, idx, &atomic.read_vector)
1144 } else {
1145 unreachable!(
1146 "Failed to report data-race for mixed-size access: no race found"
1147 )
1148 }
1149 } else {
1150 unreachable!("Failed to report data-race")
1151 };
1152
1153 let active_thread_info = global.print_thread_metadata(thread_mgr, active_index);
1155 let other_thread_info = global.print_thread_metadata(thread_mgr, other_thread);
1156 let involves_non_atomic = !access.is_atomic() || !other_access.is_atomic();
1157
1158 let extra = if other_size.is_some() {
1160 assert!(!involves_non_atomic);
1161 Some("overlapping unsynchronized atomic accesses must use the same access size")
1162 } else if access.is_read() && other_access.is_read() {
1163 panic!("there should be no same-size read-read races")
1164 } else {
1165 None
1166 };
1167 Err(err_machine_stop!(TerminationInfo::DataRace {
1168 involves_non_atomic,
1169 extra,
1170 retag_explain: access.is_retag() || other_access.is_retag(),
1171 ptr: ptr_dbg,
1172 op1: RacingOp {
1173 action: other_access.description(None, other_size),
1174 thread_info: other_thread_info,
1175 span: other_clock.as_slice()[other_thread.index()].span_data(),
1176 },
1177 op2: RacingOp {
1178 action: access.description(ty, other_size.map(|_| access_size)),
1179 thread_info: active_thread_info,
1180 span: active_clocks.clock.as_slice()[active_index.index()].span_data(),
1181 },
1182 }))?
1183 }
1184
1185 pub fn read<'tcx>(
1192 &self,
1193 alloc_id: AllocId,
1194 access_range: AllocRange,
1195 read_type: NaReadType,
1196 ty: Option<Ty<'_>>,
1197 machine: &MiriMachine<'_>,
1198 ) -> InterpResult<'tcx> {
1199 let current_span = machine.current_span();
1200 let global = machine.data_race.as_vclocks_ref().unwrap();
1201 if !global.race_detecting() {
1202 return interp_ok(());
1203 }
1204 let (index, mut thread_clocks) = global.active_thread_state_mut(&machine.threads);
1205 let mut alloc_ranges = self.alloc_ranges.borrow_mut();
1206 for (mem_clocks_range, mem_clocks) in
1207 alloc_ranges.iter_mut(access_range.start, access_range.size)
1208 {
1209 if let Err(DataRace) =
1210 mem_clocks.read_race_detect(&mut thread_clocks, index, read_type, current_span)
1211 {
1212 drop(thread_clocks);
1213 return Self::report_data_race(
1215 global,
1216 &machine.threads,
1217 mem_clocks,
1218 AccessType::NaRead(read_type),
1219 access_range.size,
1220 interpret::Pointer::new(alloc_id, Size::from_bytes(mem_clocks_range.start)),
1221 ty,
1222 );
1223 }
1224 }
1225 interp_ok(())
1226 }
1227
1228 pub fn write<'tcx>(
1234 &mut self,
1235 alloc_id: AllocId,
1236 access_range: AllocRange,
1237 write_type: NaWriteType,
1238 ty: Option<Ty<'_>>,
1239 machine: &mut MiriMachine<'_>,
1240 ) -> InterpResult<'tcx> {
1241 let current_span = machine.current_span();
1242 let global = machine.data_race.as_vclocks_mut().unwrap();
1243 if !global.race_detecting() {
1244 return interp_ok(());
1245 }
1246 let (index, mut thread_clocks) = global.active_thread_state_mut(&machine.threads);
1247 for (mem_clocks_range, mem_clocks) in
1248 self.alloc_ranges.get_mut().iter_mut(access_range.start, access_range.size)
1249 {
1250 if let Err(DataRace) =
1251 mem_clocks.write_race_detect(&mut thread_clocks, index, write_type, current_span)
1252 {
1253 drop(thread_clocks);
1254 return Self::report_data_race(
1256 global,
1257 &machine.threads,
1258 mem_clocks,
1259 AccessType::NaWrite(write_type),
1260 access_range.size,
1261 interpret::Pointer::new(alloc_id, Size::from_bytes(mem_clocks_range.start)),
1262 ty,
1263 );
1264 }
1265 }
1266 interp_ok(())
1267 }
1268}
1269
1270#[derive(Debug, Default)]
1273pub struct FrameState {
1274 local_clocks: RefCell<FxHashMap<mir::Local, LocalClocks>>,
1275}
1276
1277#[derive(Debug)]
1281struct LocalClocks {
1282 write: VTimestamp,
1283 write_type: NaWriteType,
1284 read: VTimestamp,
1285}
1286
1287impl Default for LocalClocks {
1288 fn default() -> Self {
1289 Self { write: VTimestamp::ZERO, write_type: NaWriteType::Allocate, read: VTimestamp::ZERO }
1290 }
1291}
1292
1293impl FrameState {
1294 pub fn local_write(&self, local: mir::Local, storage_live: bool, machine: &MiriMachine<'_>) {
1295 let current_span = machine.current_span();
1296 let global = machine.data_race.as_vclocks_ref().unwrap();
1297 if !global.race_detecting() {
1298 return;
1299 }
1300 let (index, mut thread_clocks) = global.active_thread_state_mut(&machine.threads);
1301 if !current_span.is_dummy() {
1303 thread_clocks.clock.index_mut(index).span = current_span;
1304 }
1305 let mut clocks = self.local_clocks.borrow_mut();
1306 if storage_live {
1307 let new_clocks = LocalClocks {
1308 write: thread_clocks.clock[index],
1309 write_type: NaWriteType::Allocate,
1310 read: VTimestamp::ZERO,
1311 };
1312 clocks.insert(local, new_clocks);
1315 } else {
1316 let clocks = clocks.entry(local).or_default();
1319 clocks.write = thread_clocks.clock[index];
1320 clocks.write_type = NaWriteType::Write;
1321 }
1322 }
1323
1324 pub fn local_read(&self, local: mir::Local, machine: &MiriMachine<'_>) {
1325 let current_span = machine.current_span();
1326 let global = machine.data_race.as_vclocks_ref().unwrap();
1327 if !global.race_detecting() {
1328 return;
1329 }
1330 let (index, mut thread_clocks) = global.active_thread_state_mut(&machine.threads);
1331 if !current_span.is_dummy() {
1333 thread_clocks.clock.index_mut(index).span = current_span;
1334 }
1335 thread_clocks.clock.index_mut(index).set_read_type(NaReadType::Read);
1336 let mut clocks = self.local_clocks.borrow_mut();
1339 let clocks = clocks.entry(local).or_default();
1340 clocks.read = thread_clocks.clock[index];
1341 }
1342
1343 pub fn local_moved_to_memory(
1344 &self,
1345 local: mir::Local,
1346 alloc: &mut VClockAlloc,
1347 machine: &MiriMachine<'_>,
1348 ) {
1349 let global = machine.data_race.as_vclocks_ref().unwrap();
1350 if !global.race_detecting() {
1351 return;
1352 }
1353 let (index, _thread_clocks) = global.active_thread_state_mut(&machine.threads);
1354 let local_clocks = self.local_clocks.borrow_mut().remove(&local).unwrap_or_default();
1358 for (_mem_clocks_range, mem_clocks) in alloc.alloc_ranges.get_mut().iter_mut_all() {
1359 assert_eq!(mem_clocks.write.0, index);
1362 mem_clocks.write = (index, local_clocks.write);
1364 mem_clocks.write_type = local_clocks.write_type;
1365 mem_clocks.read = VClock::new_with_index(index, local_clocks.read);
1366 }
1367 }
1368}
1369
1370impl<'tcx> EvalContextPrivExt<'tcx> for MiriInterpCx<'tcx> {}
1371trait EvalContextPrivExt<'tcx>: MiriInterpCxExt<'tcx> {
1372 #[inline]
1380 fn allow_data_races_ref<R>(&self, op: impl FnOnce(&MiriInterpCx<'tcx>) -> R) -> R {
1381 let this = self.eval_context_ref();
1382 this.machine.data_race.set_ongoing_action_data_race_free(true);
1383 let result = op(this);
1384 this.machine.data_race.set_ongoing_action_data_race_free(false);
1385 result
1386 }
1387
1388 #[inline]
1392 fn allow_data_races_mut<R>(&mut self, op: impl FnOnce(&mut MiriInterpCx<'tcx>) -> R) -> R {
1393 let this = self.eval_context_mut();
1394 this.machine.data_race.set_ongoing_action_data_race_free(true);
1395 let result = op(this);
1396 this.machine.data_race.set_ongoing_action_data_race_free(false);
1397 result
1398 }
1399
1400 fn atomic_access_check(
1402 &self,
1403 place: &MPlaceTy<'tcx>,
1404 access_type: AtomicAccessType,
1405 ) -> InterpResult<'tcx> {
1406 let this = self.eval_context_ref();
1407 let align = Align::from_bytes(place.layout.size.bytes()).unwrap();
1411 this.check_ptr_align(place.ptr(), align)?;
1412 let (alloc_id, _offset, _prov) = this
1420 .ptr_try_get_alloc_id(place.ptr(), 0)
1421 .expect("there are no zero-sized atomic accesses");
1422 if this.get_alloc_mutability(alloc_id)? == Mutability::Not {
1423 match access_type {
1425 AtomicAccessType::Rmw | AtomicAccessType::Store => {
1426 throw_ub_format!(
1427 "atomic store and read-modify-write operations cannot be performed on read-only memory\n\
1428 see <https://doc.rust-lang.org/nightly/std/sync/atomic/index.html#atomic-accesses-to-read-only-memory> for more information"
1429 );
1430 }
1431 AtomicAccessType::Load(_)
1432 if place.layout.size > this.tcx.data_layout().pointer_size() =>
1433 {
1434 throw_ub_format!(
1435 "large atomic load operations cannot be performed on read-only memory\n\
1436 these operations often have to be implemented using read-modify-write operations, which require writeable memory\n\
1437 see <https://doc.rust-lang.org/nightly/std/sync/atomic/index.html#atomic-accesses-to-read-only-memory> for more information"
1438 );
1439 }
1440 AtomicAccessType::Load(o) if o != AtomicReadOrd::Relaxed => {
1441 throw_ub_format!(
1442 "non-relaxed atomic load operations cannot be performed on read-only memory\n\
1443 these operations sometimes have to be implemented using read-modify-write operations, which require writeable memory\n\
1444 see <https://doc.rust-lang.org/nightly/std/sync/atomic/index.html#atomic-accesses-to-read-only-memory> for more information"
1445 );
1446 }
1447 _ => {
1448 }
1450 }
1451 }
1452 interp_ok(())
1453 }
1454
1455 fn validate_atomic_load(
1458 &self,
1459 place: &MPlaceTy<'tcx>,
1460 atomic: AtomicReadOrd,
1461 ) -> InterpResult<'tcx> {
1462 let this = self.eval_context_ref();
1463 this.validate_atomic_op(
1464 place,
1465 atomic,
1466 AccessType::AtomicLoad,
1467 move |memory, clocks, index, atomic| {
1468 if atomic == AtomicReadOrd::Relaxed {
1469 memory.load_relaxed(&mut *clocks, index, place.layout.size)
1470 } else {
1471 memory.load_acquire(&mut *clocks, index, place.layout.size)
1472 }
1473 },
1474 )
1475 }
1476
1477 fn validate_atomic_store(
1480 &mut self,
1481 place: &MPlaceTy<'tcx>,
1482 atomic: AtomicWriteOrd,
1483 ) -> InterpResult<'tcx> {
1484 let this = self.eval_context_mut();
1485 this.validate_atomic_op(
1486 place,
1487 atomic,
1488 AccessType::AtomicStore,
1489 move |memory, clocks, index, atomic| {
1490 if atomic == AtomicWriteOrd::Relaxed {
1491 memory.store_relaxed(clocks, index, place.layout.size)
1492 } else {
1493 memory.store_release(clocks, index, place.layout.size)
1494 }
1495 },
1496 )
1497 }
1498
1499 fn validate_atomic_rmw(
1502 &mut self,
1503 place: &MPlaceTy<'tcx>,
1504 atomic: AtomicRwOrd,
1505 ) -> InterpResult<'tcx> {
1506 use AtomicRwOrd::*;
1507 let acquire = matches!(atomic, Acquire | AcqRel | SeqCst);
1508 let release = matches!(atomic, Release | AcqRel | SeqCst);
1509 let this = self.eval_context_mut();
1510 this.validate_atomic_op(
1511 place,
1512 atomic,
1513 AccessType::AtomicRmw,
1514 move |memory, clocks, index, _| {
1515 if acquire {
1516 memory.load_acquire(clocks, index, place.layout.size)?;
1517 } else {
1518 memory.load_relaxed(clocks, index, place.layout.size)?;
1519 }
1520 if release {
1521 memory.rmw_release(clocks, index, place.layout.size)
1522 } else {
1523 memory.rmw_relaxed(clocks, index, place.layout.size)
1524 }
1525 },
1526 )
1527 }
1528
1529 fn validate_atomic_op<A: Debug + Copy>(
1531 &self,
1532 place: &MPlaceTy<'tcx>,
1533 atomic: A,
1534 access: AccessType,
1535 mut op: impl FnMut(
1536 &mut MemoryCellClocks,
1537 &mut ThreadClockSet,
1538 VectorIdx,
1539 A,
1540 ) -> Result<(), DataRace>,
1541 ) -> InterpResult<'tcx> {
1542 let this = self.eval_context_ref();
1543 assert!(access.is_atomic());
1544 let Some(data_race) = this.machine.data_race.as_vclocks_ref() else {
1545 return interp_ok(());
1546 };
1547 if !data_race.race_detecting() {
1548 return interp_ok(());
1549 }
1550 let size = place.layout.size;
1551 let (alloc_id, base_offset, _prov) = this.ptr_get_alloc_id(place.ptr(), 0)?;
1552 let alloc_meta = this.get_alloc_extra(alloc_id)?.data_race.as_vclocks_ref().unwrap();
1555 trace!(
1556 "Atomic op({}) with ordering {:?} on {:?} (size={})",
1557 access.description(None, None),
1558 &atomic,
1559 place.ptr(),
1560 size.bytes()
1561 );
1562
1563 let current_span = this.machine.current_span();
1564 data_race.maybe_perform_sync_operation(
1566 &this.machine.threads,
1567 current_span,
1568 |index, mut thread_clocks| {
1569 for (mem_clocks_range, mem_clocks) in
1570 alloc_meta.alloc_ranges.borrow_mut().iter_mut(base_offset, size)
1571 {
1572 if let Err(DataRace) = op(mem_clocks, &mut thread_clocks, index, atomic) {
1573 mem::drop(thread_clocks);
1574 return VClockAlloc::report_data_race(
1575 data_race,
1576 &this.machine.threads,
1577 mem_clocks,
1578 access,
1579 place.layout.size,
1580 interpret::Pointer::new(
1581 alloc_id,
1582 Size::from_bytes(mem_clocks_range.start),
1583 ),
1584 None,
1585 )
1586 .map(|_| true);
1587 }
1588 }
1589
1590 interp_ok(true)
1592 },
1593 )?;
1594
1595 if tracing::enabled!(tracing::Level::TRACE) {
1597 for (_offset, mem_clocks) in alloc_meta.alloc_ranges.borrow().iter(base_offset, size) {
1598 trace!(
1599 "Updated atomic memory({:?}, size={}) to {:#?}",
1600 place.ptr(),
1601 size.bytes(),
1602 mem_clocks.atomic_ops
1603 );
1604 }
1605 }
1606
1607 interp_ok(())
1608 }
1609}
1610
1611impl GlobalState {
1612 pub fn new(config: &MiriConfig) -> Self {
1615 let mut global_state = GlobalState {
1616 multi_threaded: Cell::new(false),
1617 ongoing_action_data_race_free: Cell::new(false),
1618 vector_clocks: RefCell::new(IndexVec::new()),
1619 vector_info: RefCell::new(IndexVec::new()),
1620 thread_info: RefCell::new(IndexVec::new()),
1621 reuse_candidates: RefCell::new(FxHashSet::default()),
1622 last_sc_fence: RefCell::new(VClock::default()),
1623 last_sc_write_per_thread: RefCell::new(VClock::default()),
1624 track_outdated_loads: config.track_outdated_loads,
1625 weak_memory: config.weak_memory_emulation,
1626 };
1627
1628 let index = global_state.vector_clocks.get_mut().push(ThreadClockSet::default());
1631 global_state.vector_info.get_mut().push(ThreadId::MAIN_THREAD);
1632 global_state
1633 .thread_info
1634 .get_mut()
1635 .push(ThreadExtraState { vector_index: Some(index), termination_vector_clock: None });
1636
1637 global_state
1638 }
1639
1640 fn race_detecting(&self) -> bool {
1644 self.multi_threaded.get() && !self.ongoing_action_data_race_free.get()
1645 }
1646
1647 pub fn ongoing_action_data_race_free(&self) -> bool {
1648 self.ongoing_action_data_race_free.get()
1649 }
1650
1651 fn find_vector_index_reuse_candidate(&self) -> Option<VectorIdx> {
1654 let mut reuse = self.reuse_candidates.borrow_mut();
1655 let vector_clocks = self.vector_clocks.borrow();
1656 for &candidate in reuse.iter() {
1657 let target_timestamp = vector_clocks[candidate].clock[candidate];
1658 if vector_clocks.iter_enumerated().all(|(clock_idx, clock)| {
1659 let no_data_race = clock.clock[candidate] >= target_timestamp;
1662
1663 let vector_terminated = reuse.contains(&clock_idx);
1666
1667 no_data_race || vector_terminated
1670 }) {
1671 assert!(reuse.remove(&candidate));
1676 return Some(candidate);
1677 }
1678 }
1679 None
1680 }
1681
1682 #[inline]
1685 pub fn thread_created(
1686 &mut self,
1687 thread_mgr: &ThreadManager<'_>,
1688 thread: ThreadId,
1689 current_span: Span,
1690 ) {
1691 let current_index = self.active_thread_index(thread_mgr);
1692
1693 self.multi_threaded.set(true);
1696
1697 let mut thread_info = self.thread_info.borrow_mut();
1699 thread_info.ensure_contains_elem(thread, Default::default);
1700
1701 let created_index = if let Some(reuse_index) = self.find_vector_index_reuse_candidate() {
1704 let vector_clocks = self.vector_clocks.get_mut();
1707 vector_clocks[reuse_index].increment_clock(reuse_index, current_span);
1708
1709 let vector_info = self.vector_info.get_mut();
1712 let old_thread = vector_info[reuse_index];
1713 vector_info[reuse_index] = thread;
1714
1715 thread_info[old_thread].vector_index = None;
1718
1719 reuse_index
1720 } else {
1721 let vector_info = self.vector_info.get_mut();
1724 vector_info.push(thread)
1725 };
1726
1727 trace!("Creating thread = {:?} with vector index = {:?}", thread, created_index);
1728
1729 thread_info[thread].vector_index = Some(created_index);
1731
1732 let vector_clocks = self.vector_clocks.get_mut();
1734 if created_index == vector_clocks.next_index() {
1735 vector_clocks.push(ThreadClockSet::default());
1736 }
1737
1738 let (current, created) = vector_clocks.pick2_mut(current_index, created_index);
1740
1741 created.join_with(current);
1744
1745 current.increment_clock(current_index, current_span);
1748 created.increment_clock(created_index, current_span);
1749 }
1750
1751 #[inline]
1755 pub fn thread_joined(&mut self, threads: &ThreadManager<'_>, joinee: ThreadId) {
1756 let thread_info = self.thread_info.borrow();
1757 let thread_info = &thread_info[joinee];
1758
1759 let join_clock = thread_info
1761 .termination_vector_clock
1762 .as_ref()
1763 .expect("joined with thread but thread has not terminated");
1764 self.acquire_clock(join_clock, threads);
1766
1767 if let Some(current_index) = thread_info.vector_index {
1772 if threads.get_live_thread_count() == 1 {
1773 let vector_clocks = self.vector_clocks.get_mut();
1774 let current_clock = &vector_clocks[current_index];
1776 if vector_clocks
1777 .iter_enumerated()
1778 .all(|(idx, clocks)| clocks.clock[idx] <= current_clock.clock[idx])
1779 {
1780 self.multi_threaded.set(false);
1784 }
1785 }
1786 }
1787 }
1788
1789 #[inline]
1797 pub fn thread_terminated(&mut self, thread_mgr: &ThreadManager<'_>) {
1798 let current_thread = thread_mgr.active_thread();
1799 let current_index = self.active_thread_index(thread_mgr);
1800
1801 let terminaion_clock = self.release_clock(thread_mgr, |clock| clock.clone());
1803 self.thread_info.get_mut()[current_thread].termination_vector_clock =
1804 Some(terminaion_clock);
1805
1806 let reuse = self.reuse_candidates.get_mut();
1808 reuse.insert(current_index);
1809 }
1810
1811 fn atomic_fence<'tcx>(
1813 &self,
1814 machine: &MiriMachine<'tcx>,
1815 atomic: AtomicFenceOrd,
1816 ) -> InterpResult<'tcx> {
1817 let current_span = machine.current_span();
1818 self.maybe_perform_sync_operation(&machine.threads, current_span, |index, mut clocks| {
1819 trace!("Atomic fence on {:?} with ordering {:?}", index, atomic);
1820
1821 if atomic != AtomicFenceOrd::Release {
1825 clocks.apply_acquire_fence();
1827 }
1828 if atomic == AtomicFenceOrd::SeqCst {
1829 let mut sc_fence_clock = self.last_sc_fence.borrow_mut();
1837 sc_fence_clock.join(&clocks.clock);
1838 clocks.clock.join(&sc_fence_clock);
1839 clocks.write_seqcst.join(&self.last_sc_write_per_thread.borrow());
1842 }
1843 if atomic != AtomicFenceOrd::Acquire {
1846 clocks.apply_release_fence();
1848 }
1849
1850 interp_ok(atomic != AtomicFenceOrd::Acquire)
1852 })
1853 }
1854
1855 fn maybe_perform_sync_operation<'tcx>(
1863 &self,
1864 thread_mgr: &ThreadManager<'_>,
1865 current_span: Span,
1866 op: impl FnOnce(VectorIdx, RefMut<'_, ThreadClockSet>) -> InterpResult<'tcx, bool>,
1867 ) -> InterpResult<'tcx> {
1868 if self.multi_threaded.get() {
1869 let (index, clocks) = self.active_thread_state_mut(thread_mgr);
1870 if op(index, clocks)? {
1871 let (_, mut clocks) = self.active_thread_state_mut(thread_mgr);
1872 clocks.increment_clock(index, current_span);
1873 }
1874 }
1875 interp_ok(())
1876 }
1877
1878 fn print_thread_metadata(&self, thread_mgr: &ThreadManager<'_>, vector: VectorIdx) -> String {
1881 let thread = self.vector_info.borrow()[vector];
1882 let thread_name = thread_mgr.get_thread_display_name(thread);
1883 format!("thread `{thread_name}`")
1884 }
1885
1886 pub fn acquire_clock<'tcx>(&self, clock: &VClock, threads: &ThreadManager<'tcx>) {
1891 let thread = threads.active_thread();
1892 let (_, mut clocks) = self.thread_state_mut(thread);
1893 clocks.clock.join(clock);
1894 }
1895
1896 pub fn release_clock<'tcx, R>(
1900 &self,
1901 threads: &ThreadManager<'tcx>,
1902 callback: impl FnOnce(&VClock) -> R,
1903 ) -> R {
1904 let thread = threads.active_thread();
1905 let span = threads.active_thread_ref().current_span();
1906 let (index, mut clocks) = self.thread_state_mut(thread);
1907 let r = callback(&clocks.clock);
1908 clocks.increment_clock(index, span);
1911
1912 r
1913 }
1914
1915 fn thread_index(&self, thread: ThreadId) -> VectorIdx {
1916 self.thread_info.borrow()[thread].vector_index.expect("thread has no assigned vector")
1917 }
1918
1919 #[inline]
1922 fn thread_state_mut(&self, thread: ThreadId) -> (VectorIdx, RefMut<'_, ThreadClockSet>) {
1923 let index = self.thread_index(thread);
1924 let ref_vector = self.vector_clocks.borrow_mut();
1925 let clocks = RefMut::map(ref_vector, |vec| &mut vec[index]);
1926 (index, clocks)
1927 }
1928
1929 #[inline]
1932 fn thread_state(&self, thread: ThreadId) -> (VectorIdx, Ref<'_, ThreadClockSet>) {
1933 let index = self.thread_index(thread);
1934 let ref_vector = self.vector_clocks.borrow();
1935 let clocks = Ref::map(ref_vector, |vec| &vec[index]);
1936 (index, clocks)
1937 }
1938
1939 #[inline]
1942 pub(super) fn active_thread_state(
1943 &self,
1944 thread_mgr: &ThreadManager<'_>,
1945 ) -> (VectorIdx, Ref<'_, ThreadClockSet>) {
1946 self.thread_state(thread_mgr.active_thread())
1947 }
1948
1949 #[inline]
1952 pub(super) fn active_thread_state_mut(
1953 &self,
1954 thread_mgr: &ThreadManager<'_>,
1955 ) -> (VectorIdx, RefMut<'_, ThreadClockSet>) {
1956 self.thread_state_mut(thread_mgr.active_thread())
1957 }
1958
1959 #[inline]
1962 fn active_thread_index(&self, thread_mgr: &ThreadManager<'_>) -> VectorIdx {
1963 let active_thread_id = thread_mgr.active_thread();
1964 self.thread_index(active_thread_id)
1965 }
1966
1967 pub(super) fn sc_write(&self, thread_mgr: &ThreadManager<'_>) {
1969 let (index, clocks) = self.active_thread_state(thread_mgr);
1970 self.last_sc_write_per_thread.borrow_mut().set_at_index(&clocks.clock, index);
1971 }
1972
1973 pub(super) fn sc_read(&self, thread_mgr: &ThreadManager<'_>) {
1975 let (.., mut clocks) = self.active_thread_state_mut(thread_mgr);
1976 clocks.read_seqcst.join(&self.last_sc_fence.borrow());
1977 }
1978}