1use std::cell::RefCell;
2use std::collections::VecDeque;
3use std::collections::hash_map::Entry;
4use std::default::Default;
5use std::ops::Not;
6use std::rc::Rc;
7use std::time::Duration;
8
9use rustc_abi::Size;
10use rustc_data_structures::fx::FxHashMap;
11
12use super::vector_clock::VClock;
13use crate::*;
14
15#[derive(Default, Debug)]
17struct Mutex {
18 owner: Option<ThreadId>,
20 lock_count: usize,
22 queue: VecDeque<ThreadId>,
24 clock: VClock,
26}
27
28#[derive(Default, Clone, Debug)]
29pub struct MutexRef(Rc<RefCell<Mutex>>);
30
31impl MutexRef {
32 pub fn new() -> Self {
33 Self(Default::default())
34 }
35
36 pub fn owner(&self) -> Option<ThreadId> {
38 self.0.borrow().owner
39 }
40}
41
42impl VisitProvenance for MutexRef {
43 fn visit_provenance(&self, _visit: &mut VisitWith<'_>) {}
45}
46
47#[derive(Default, Debug)]
49struct RwLock {
50 writer: Option<ThreadId>,
52 readers: FxHashMap<ThreadId, usize>,
55 writer_queue: VecDeque<ThreadId>,
57 reader_queue: VecDeque<ThreadId>,
59 clock_unlocked: VClock,
68 clock_current_readers: VClock,
79}
80
81impl RwLock {
82 #[inline]
83 fn is_locked(&self) -> bool {
85 trace!(
86 "rwlock_is_locked: writer is {:?} and there are {} reader threads (some of which could hold multiple read locks)",
87 self.writer,
88 self.readers.len(),
89 );
90 self.writer.is_some() || self.readers.is_empty().not()
91 }
92
93 #[inline]
95 fn is_write_locked(&self) -> bool {
96 trace!("rwlock_is_write_locked: writer is {:?}", self.writer);
97 self.writer.is_some()
98 }
99}
100
101#[derive(Default, Clone, Debug)]
102pub struct RwLockRef(Rc<RefCell<RwLock>>);
103
104impl RwLockRef {
105 pub fn new() -> Self {
106 Self(Default::default())
107 }
108
109 pub fn is_locked(&self) -> bool {
110 self.0.borrow().is_locked()
111 }
112
113 pub fn is_write_locked(&self) -> bool {
114 self.0.borrow().is_write_locked()
115 }
116}
117
118impl VisitProvenance for RwLockRef {
119 fn visit_provenance(&self, _visit: &mut VisitWith<'_>) {}
121}
122
123#[derive(Default, Debug)]
125struct Condvar {
126 waiters: VecDeque<ThreadId>,
127 clock: VClock,
133}
134
135#[derive(Default, Clone, Debug)]
136pub struct CondvarRef(Rc<RefCell<Condvar>>);
137
138impl CondvarRef {
139 pub fn new() -> Self {
140 Self(Default::default())
141 }
142
143 pub fn is_awaited(&self) -> bool {
144 !self.0.borrow().waiters.is_empty()
145 }
146}
147
148impl VisitProvenance for CondvarRef {
149 fn visit_provenance(&self, _visit: &mut VisitWith<'_>) {}
151}
152
153#[derive(Default, Debug)]
155struct Futex {
156 waiters: Vec<FutexWaiter>,
157 clock: VClock,
163}
164
165#[derive(Default, Clone, Debug)]
166pub struct FutexRef(Rc<RefCell<Futex>>);
167
168impl FutexRef {
169 pub fn new() -> Self {
170 Self(Default::default())
171 }
172
173 pub fn waiters(&self) -> usize {
174 self.0.borrow().waiters.len()
175 }
176}
177
178impl VisitProvenance for FutexRef {
179 fn visit_provenance(&self, _visit: &mut VisitWith<'_>) {}
181}
182
183#[derive(Debug)]
185struct FutexWaiter {
186 thread: ThreadId,
188 bitset: u32,
190}
191
192impl<'tcx> EvalContextExtPriv<'tcx> for crate::MiriInterpCx<'tcx> {}
194pub(super) trait EvalContextExtPriv<'tcx>: crate::MiriInterpCxExt<'tcx> {
195 fn condvar_reacquire_mutex(
196 &mut self,
197 mutex_ref: MutexRef,
198 retval: Scalar,
199 dest: MPlaceTy<'tcx>,
200 ) -> InterpResult<'tcx> {
201 let this = self.eval_context_mut();
202 if let Some(owner) = mutex_ref.owner() {
203 assert_ne!(owner, this.active_thread());
204 this.mutex_enqueue_and_block(mutex_ref, Some((retval, dest)));
205 } else {
206 this.mutex_lock(&mutex_ref);
208 this.write_scalar(retval, &dest)?;
210 }
211 interp_ok(())
212 }
213}
214
215impl<'tcx> AllocExtra<'tcx> {
216 fn get_sync<T: 'static>(&self, offset: Size) -> Option<&T> {
217 self.sync.get(&offset).and_then(|s| s.downcast_ref::<T>())
218 }
219}
220
221pub const LAZY_INIT_COOKIE: u32 = 0xcafe_affe;
224
225impl<'tcx> EvalContextExt<'tcx> for crate::MiriInterpCx<'tcx> {}
230pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
231 fn lazy_sync_init<'a, T: 'static>(
235 &'a mut self,
236 primitive: &MPlaceTy<'tcx>,
237 init_offset: Size,
238 data: T,
239 ) -> InterpResult<'tcx, &'a T>
240 where
241 'tcx: 'a,
242 {
243 let this = self.eval_context_mut();
244
245 let (alloc, offset, _) = this.ptr_get_alloc_id(primitive.ptr(), 0)?;
246 let (alloc_extra, _machine) = this.get_alloc_extra_mut(alloc)?;
247 alloc_extra.sync.insert(offset, Box::new(data));
248 let init_field = primitive.offset(init_offset, this.machine.layouts.u32, this)?;
250 this.write_scalar_atomic(
251 Scalar::from_u32(LAZY_INIT_COOKIE),
252 &init_field,
253 AtomicWriteOrd::Relaxed,
254 )?;
255 interp_ok(this.get_alloc_extra(alloc)?.get_sync::<T>(offset).unwrap())
256 }
257
258 fn lazy_sync_get_data<'a, T: 'static>(
266 &'a mut self,
267 primitive: &MPlaceTy<'tcx>,
268 init_offset: Size,
269 missing_data: impl FnOnce() -> InterpResult<'tcx, T>,
270 new_data: impl FnOnce(&mut MiriInterpCx<'tcx>) -> InterpResult<'tcx, T>,
271 ) -> InterpResult<'tcx, &'a T>
272 where
273 'tcx: 'a,
274 {
275 let this = self.eval_context_mut();
276
277 let init_cookie = Scalar::from_u32(LAZY_INIT_COOKIE);
281 let init_field = primitive.offset(init_offset, this.machine.layouts.u32, this)?;
282 let (_init, success) = this
283 .atomic_compare_exchange_scalar(
284 &init_field,
285 &ImmTy::from_scalar(init_cookie, this.machine.layouts.u32),
286 init_cookie,
287 AtomicRwOrd::Relaxed,
288 AtomicReadOrd::Relaxed,
289 false,
290 )?
291 .to_scalar_pair();
292
293 if success.to_bool()? {
294 let (alloc, offset, _) = this.ptr_get_alloc_id(primitive.ptr(), 0)?;
297 let (alloc_extra, _machine) = this.get_alloc_extra_mut(alloc)?;
298 if alloc_extra.get_sync::<T>(offset).is_none() {
300 let data = missing_data()?;
301 alloc_extra.sync.insert(offset, Box::new(data));
302 }
303 interp_ok(alloc_extra.get_sync::<T>(offset).unwrap())
304 } else {
305 let data = new_data(this)?;
306 this.lazy_sync_init(primitive, init_offset, data)
307 }
308 }
309
310 fn get_sync_or_init<'a, T: 'static>(
315 &'a mut self,
316 ptr: Pointer,
317 new: impl FnOnce(&'a mut MiriMachine<'tcx>) -> T,
318 ) -> Option<&'a T>
319 where
320 'tcx: 'a,
321 {
322 let this = self.eval_context_mut();
323 if !this.ptr_try_get_alloc_id(ptr, 0).ok().is_some_and(|(alloc_id, offset, ..)| {
324 let info = this.get_alloc_info(alloc_id);
325 info.kind == AllocKind::LiveData && info.mutbl.is_mut() && offset < info.size
326 }) {
327 return None;
328 }
329 let (alloc, offset, _) = this.ptr_get_alloc_id(ptr, 0).unwrap();
331 let (alloc_extra, machine) = this.get_alloc_extra_mut(alloc).unwrap();
332 if alloc_extra.get_sync::<T>(offset).is_none() {
334 let new = new(machine);
335 alloc_extra.sync.insert(offset, Box::new(new));
336 }
337 Some(alloc_extra.get_sync::<T>(offset).unwrap())
338 }
339
340 fn mutex_lock(&mut self, mutex_ref: &MutexRef) {
342 let this = self.eval_context_mut();
343 let thread = this.active_thread();
344 let mut mutex = mutex_ref.0.borrow_mut();
345 if let Some(current_owner) = mutex.owner {
346 assert_eq!(thread, current_owner, "mutex already locked by another thread");
347 assert!(
348 mutex.lock_count > 0,
349 "invariant violation: lock_count == 0 iff the thread is unlocked"
350 );
351 } else {
352 mutex.owner = Some(thread);
353 }
354 mutex.lock_count = mutex.lock_count.strict_add(1);
355 if let Some(data_race) = this.machine.data_race.as_vclocks_ref() {
356 data_race.acquire_clock(&mutex.clock, &this.machine.threads);
357 }
358 }
359
360 fn mutex_unlock(&mut self, mutex_ref: &MutexRef) -> InterpResult<'tcx, Option<usize>> {
365 let this = self.eval_context_mut();
366 let mut mutex = mutex_ref.0.borrow_mut();
367 interp_ok(if let Some(current_owner) = mutex.owner {
368 if current_owner != this.machine.threads.active_thread() {
370 return interp_ok(None);
372 }
373 let old_lock_count = mutex.lock_count;
374 mutex.lock_count = old_lock_count.strict_sub(1);
375 if mutex.lock_count == 0 {
376 mutex.owner = None;
377 if let Some(data_race) = this.machine.data_race.as_vclocks_ref() {
381 data_race.release_clock(&this.machine.threads, |clock| {
382 mutex.clock.clone_from(clock)
383 });
384 }
385 let thread_id = mutex.queue.pop_front();
386 drop(mutex);
389 if thread_id.is_some() {
390 this.unblock_thread(thread_id.unwrap(), BlockReason::Mutex)?;
391 }
392 }
393 Some(old_lock_count)
394 } else {
395 None
397 })
398 }
399
400 #[inline]
405 fn mutex_enqueue_and_block(
406 &mut self,
407 mutex_ref: MutexRef,
408 retval_dest: Option<(Scalar, MPlaceTy<'tcx>)>,
409 ) {
410 let this = self.eval_context_mut();
411 let thread = this.active_thread();
412 let mut mutex = mutex_ref.0.borrow_mut();
413 mutex.queue.push_back(thread);
414 assert!(mutex.owner.is_some(), "queuing on unlocked mutex");
415 drop(mutex);
416 this.block_thread(
417 BlockReason::Mutex,
418 None,
419 callback!(
420 @capture<'tcx> {
421 mutex_ref: MutexRef,
422 retval_dest: Option<(Scalar, MPlaceTy<'tcx>)>,
423 }
424 |this, unblock: UnblockKind| {
425 assert_eq!(unblock, UnblockKind::Ready);
426
427 assert!(mutex_ref.owner().is_none());
428 this.mutex_lock(&mutex_ref);
429
430 if let Some((retval, dest)) = retval_dest {
431 this.write_scalar(retval, &dest)?;
432 }
433
434 interp_ok(())
435 }
436 ),
437 );
438 }
439
440 fn rwlock_reader_lock(&mut self, rwlock_ref: &RwLockRef) {
443 let this = self.eval_context_mut();
444 let thread = this.active_thread();
445 trace!("rwlock_reader_lock: now also held (one more time) by {:?}", thread);
446 let mut rwlock = rwlock_ref.0.borrow_mut();
447 assert!(!rwlock.is_write_locked(), "the lock is write locked");
448 let count = rwlock.readers.entry(thread).or_insert(0);
449 *count = count.strict_add(1);
450 if let Some(data_race) = this.machine.data_race.as_vclocks_ref() {
451 data_race.acquire_clock(&rwlock.clock_unlocked, &this.machine.threads);
452 }
453 }
454
455 fn rwlock_reader_unlock(&mut self, rwlock_ref: &RwLockRef) -> InterpResult<'tcx, bool> {
458 let this = self.eval_context_mut();
459 let thread = this.active_thread();
460 let mut rwlock = rwlock_ref.0.borrow_mut();
461 match rwlock.readers.entry(thread) {
462 Entry::Occupied(mut entry) => {
463 let count = entry.get_mut();
464 assert!(*count > 0, "rwlock locked with count == 0");
465 *count -= 1;
466 if *count == 0 {
467 trace!("rwlock_reader_unlock: no longer held by {:?}", thread);
468 entry.remove();
469 } else {
470 trace!("rwlock_reader_unlock: held one less time by {:?}", thread);
471 }
472 }
473 Entry::Vacant(_) => return interp_ok(false), }
475 if let Some(data_race) = this.machine.data_race.as_vclocks_ref() {
476 data_race.release_clock(&this.machine.threads, |clock| {
478 rwlock.clock_current_readers.join(clock)
479 });
480 }
481
482 if rwlock.is_locked().not() {
484 let rwlock_ref = &mut *rwlock;
488 rwlock_ref.clock_unlocked.clone_from(&rwlock_ref.clock_current_readers);
489 if let Some(writer) = rwlock_ref.writer_queue.pop_front() {
491 drop(rwlock); this.unblock_thread(writer, BlockReason::RwLock)?;
493 }
494 }
495 interp_ok(true)
496 }
497
498 #[inline]
501 fn rwlock_enqueue_and_block_reader(
502 &mut self,
503 rwlock_ref: RwLockRef,
504 retval: Scalar,
505 dest: MPlaceTy<'tcx>,
506 ) {
507 let this = self.eval_context_mut();
508 let thread = this.active_thread();
509 let mut rwlock = rwlock_ref.0.borrow_mut();
510 rwlock.reader_queue.push_back(thread);
511 assert!(rwlock.is_write_locked(), "read-queueing on not write locked rwlock");
512 drop(rwlock);
513 this.block_thread(
514 BlockReason::RwLock,
515 None,
516 callback!(
517 @capture<'tcx> {
518 rwlock_ref: RwLockRef,
519 retval: Scalar,
520 dest: MPlaceTy<'tcx>,
521 }
522 |this, unblock: UnblockKind| {
523 assert_eq!(unblock, UnblockKind::Ready);
524 this.rwlock_reader_lock(&rwlock_ref);
525 this.write_scalar(retval, &dest)?;
526 interp_ok(())
527 }
528 ),
529 );
530 }
531
532 #[inline]
534 fn rwlock_writer_lock(&mut self, rwlock_ref: &RwLockRef) {
535 let this = self.eval_context_mut();
536 let thread = this.active_thread();
537 trace!("rwlock_writer_lock: now held by {:?}", thread);
538
539 let mut rwlock = rwlock_ref.0.borrow_mut();
540 assert!(!rwlock.is_locked(), "the rwlock is already locked");
541 rwlock.writer = Some(thread);
542 if let Some(data_race) = this.machine.data_race.as_vclocks_ref() {
543 data_race.acquire_clock(&rwlock.clock_unlocked, &this.machine.threads);
544 }
545 }
546
547 #[inline]
550 fn rwlock_writer_unlock(&mut self, rwlock_ref: &RwLockRef) -> InterpResult<'tcx, bool> {
551 let this = self.eval_context_mut();
552 let thread = this.active_thread();
553 let mut rwlock = rwlock_ref.0.borrow_mut();
554 interp_ok(if let Some(current_writer) = rwlock.writer {
555 if current_writer != thread {
556 return interp_ok(false);
558 }
559 rwlock.writer = None;
560 trace!("rwlock_writer_unlock: unlocked by {:?}", thread);
561 if let Some(data_race) = this.machine.data_race.as_vclocks_ref() {
563 data_race.release_clock(&this.machine.threads, |clock| {
564 rwlock.clock_unlocked.clone_from(clock)
565 });
566 }
567
568 if let Some(writer) = rwlock.writer_queue.pop_front() {
574 drop(rwlock); this.unblock_thread(writer, BlockReason::RwLock)?;
576 } else {
577 let readers = std::mem::take(&mut rwlock.reader_queue);
579 drop(rwlock); for reader in readers {
581 this.unblock_thread(reader, BlockReason::RwLock)?;
582 }
583 }
584 true
585 } else {
586 false
587 })
588 }
589
590 #[inline]
593 fn rwlock_enqueue_and_block_writer(
594 &mut self,
595 rwlock_ref: RwLockRef,
596 retval: Scalar,
597 dest: MPlaceTy<'tcx>,
598 ) {
599 let this = self.eval_context_mut();
600 let thread = this.active_thread();
601 let mut rwlock = rwlock_ref.0.borrow_mut();
602 rwlock.writer_queue.push_back(thread);
603 assert!(rwlock.is_locked(), "write-queueing on unlocked rwlock");
604 drop(rwlock);
605 this.block_thread(
606 BlockReason::RwLock,
607 None,
608 callback!(
609 @capture<'tcx> {
610 rwlock_ref: RwLockRef,
611 retval: Scalar,
612 dest: MPlaceTy<'tcx>,
613 }
614 |this, unblock: UnblockKind| {
615 assert_eq!(unblock, UnblockKind::Ready);
616 this.rwlock_writer_lock(&rwlock_ref);
617 this.write_scalar(retval, &dest)?;
618 interp_ok(())
619 }
620 ),
621 );
622 }
623
624 fn condvar_wait(
628 &mut self,
629 condvar_ref: CondvarRef,
630 mutex_ref: MutexRef,
631 timeout: Option<(TimeoutClock, TimeoutAnchor, Duration)>,
632 retval_succ: Scalar,
633 retval_timeout: Scalar,
634 dest: MPlaceTy<'tcx>,
635 ) -> InterpResult<'tcx> {
636 let this = self.eval_context_mut();
637 if let Some(old_locked_count) = this.mutex_unlock(&mutex_ref)? {
638 if old_locked_count != 1 {
639 throw_unsup_format!(
640 "awaiting a condvar on a mutex acquired multiple times is not supported"
641 );
642 }
643 } else {
644 throw_ub_format!(
645 "awaiting a condvar on a mutex that is unlocked or owned by a different thread"
646 );
647 }
648 let thread = this.active_thread();
649
650 condvar_ref.0.borrow_mut().waiters.push_back(thread);
651 this.block_thread(
652 BlockReason::Condvar,
653 timeout,
654 callback!(
655 @capture<'tcx> {
656 condvar_ref: CondvarRef,
657 mutex_ref: MutexRef,
658 retval_succ: Scalar,
659 retval_timeout: Scalar,
660 dest: MPlaceTy<'tcx>,
661 }
662 |this, unblock: UnblockKind| {
663 match unblock {
664 UnblockKind::Ready => {
665 if let Some(data_race) = this.machine.data_race.as_vclocks_ref() {
667 data_race.acquire_clock(
668 &condvar_ref.0.borrow().clock,
669 &this.machine.threads,
670 );
671 }
672 this.condvar_reacquire_mutex(mutex_ref, retval_succ, dest)
675 }
676 UnblockKind::TimedOut => {
677 let thread = this.active_thread();
679 let waiters = &mut condvar_ref.0.borrow_mut().waiters;
680 waiters.retain(|waiter| *waiter != thread);
681 this.condvar_reacquire_mutex(mutex_ref, retval_timeout, dest)
683 }
684 }
685 }
686 ),
687 );
688 interp_ok(())
689 }
690
691 fn condvar_signal(&mut self, condvar_ref: &CondvarRef) -> InterpResult<'tcx, bool> {
694 let this = self.eval_context_mut();
695 let mut condvar = condvar_ref.0.borrow_mut();
696
697 if let Some(data_race) = this.machine.data_race.as_vclocks_ref() {
699 data_race.release_clock(&this.machine.threads, |clock| condvar.clock.clone_from(clock));
700 }
701 let Some(waiter) = condvar.waiters.pop_front() else {
702 return interp_ok(false);
703 };
704 drop(condvar);
705 this.unblock_thread(waiter, BlockReason::Condvar)?;
706 interp_ok(true)
707 }
708
709 fn futex_wait(
712 &mut self,
713 futex_ref: FutexRef,
714 bitset: u32,
715 timeout: Option<(TimeoutClock, TimeoutAnchor, Duration)>,
716 callback: DynUnblockCallback<'tcx>,
717 ) {
718 let this = self.eval_context_mut();
719 let thread = this.active_thread();
720 let mut futex = futex_ref.0.borrow_mut();
721 let waiters = &mut futex.waiters;
722 assert!(waiters.iter().all(|waiter| waiter.thread != thread), "thread is already waiting");
723 waiters.push(FutexWaiter { thread, bitset });
724 drop(futex);
725
726 this.block_thread(
727 BlockReason::Futex,
728 timeout,
729 callback!(
730 @capture<'tcx> {
731 futex_ref: FutexRef,
732 callback: DynUnblockCallback<'tcx>,
733 }
734 |this, unblock: UnblockKind| {
735 match unblock {
736 UnblockKind::Ready => {
737 let futex = futex_ref.0.borrow();
738 if let Some(data_race) = this.machine.data_race.as_vclocks_ref() {
740 data_race.acquire_clock(&futex.clock, &this.machine.threads);
741 }
742 },
743 UnblockKind::TimedOut => {
744 let thread = this.active_thread();
746 let mut futex = futex_ref.0.borrow_mut();
747 futex.waiters.retain(|waiter| waiter.thread != thread);
748 },
749 }
750
751 callback.call(this, unblock)
752 }
753 ),
754 );
755 }
756
757 fn futex_wake(
760 &mut self,
761 futex_ref: &FutexRef,
762 bitset: u32,
763 count: usize,
764 ) -> InterpResult<'tcx, usize> {
765 let this = self.eval_context_mut();
766 let mut futex = futex_ref.0.borrow_mut();
767
768 if let Some(data_race) = this.machine.data_race.as_vclocks_ref() {
770 data_race.release_clock(&this.machine.threads, |clock| futex.clock.clone_from(clock));
771 }
772
773 let waiters: Vec<_> =
777 futex.waiters.extract_if(.., |w| w.bitset & bitset != 0).take(count).collect();
778 drop(futex);
779
780 let woken = waiters.len();
781 for waiter in waiters {
782 this.unblock_thread(waiter.thread, BlockReason::Futex)?;
783 }
784
785 interp_ok(woken)
786 }
787}