miri/concurrency/
sync.rs

1use std::any::Any;
2use std::cell::RefCell;
3use std::collections::VecDeque;
4use std::collections::hash_map::Entry;
5use std::default::Default;
6use std::ops::Not;
7use std::rc::Rc;
8use std::time::Duration;
9use std::{fmt, iter};
10
11use rustc_abi::Size;
12use rustc_data_structures::fx::FxHashMap;
13
14use super::vector_clock::VClock;
15use crate::*;
16
17/// Indicates which kind of access is being performed.
18#[derive(Copy, Clone, Hash, PartialEq, Eq, Debug)]
19pub enum AccessKind {
20    Read,
21    Write,
22    Dealloc,
23}
24
25impl fmt::Display for AccessKind {
26    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
27        match self {
28            AccessKind::Read => write!(f, "read"),
29            AccessKind::Write => write!(f, "write"),
30            AccessKind::Dealloc => write!(f, "deallocation"),
31        }
32    }
33}
34
35/// A trait for the synchronization metadata that can be attached to a memory location.
36pub trait SyncObj: Any {
37    /// Determines whether reads/writes to this object's location are currently permitted.
38    fn on_access<'tcx>(&self, _access_kind: AccessKind) -> InterpResult<'tcx> {
39        interp_ok(())
40    }
41
42    /// Determines whether this object's metadata shall be deleted when a write to its
43    /// location occurs.
44    fn delete_on_write(&self) -> bool {
45        false
46    }
47}
48
49impl dyn SyncObj {
50    #[inline(always)]
51    pub fn downcast_ref<T: Any>(&self) -> Option<&T> {
52        let x: &dyn Any = self;
53        x.downcast_ref()
54    }
55}
56
57impl fmt::Debug for dyn SyncObj {
58    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
59        f.debug_struct("SyncObj").finish_non_exhaustive()
60    }
61}
62
63/// The mutex state.
64#[derive(Default, Debug)]
65struct Mutex {
66    /// The thread that currently owns the lock.
67    owner: Option<ThreadId>,
68    /// How many times the mutex was locked by the owner.
69    lock_count: usize,
70    /// The queue of threads waiting for this mutex.
71    queue: VecDeque<ThreadId>,
72    /// Mutex clock. This tracks the moment of the last unlock.
73    clock: VClock,
74}
75
76#[derive(Default, Clone, Debug)]
77pub struct MutexRef(Rc<RefCell<Mutex>>);
78
79impl MutexRef {
80    pub fn new() -> Self {
81        Self(Default::default())
82    }
83
84    /// Get the id of the thread that currently owns this lock, or `None` if it is not locked.
85    pub fn owner(&self) -> Option<ThreadId> {
86        self.0.borrow().owner
87    }
88
89    pub fn queue_is_empty(&self) -> bool {
90        self.0.borrow().queue.is_empty()
91    }
92}
93
94impl VisitProvenance for MutexRef {
95    // Mutex contains no provenance.
96    fn visit_provenance(&self, _visit: &mut VisitWith<'_>) {}
97}
98
99/// The read-write lock state.
100#[derive(Default, Debug)]
101struct RwLock {
102    /// The writer thread that currently owns the lock.
103    writer: Option<ThreadId>,
104    /// The readers that currently own the lock and how many times they acquired
105    /// the lock.
106    readers: FxHashMap<ThreadId, usize>,
107    /// The queue of writer threads waiting for this lock.
108    writer_queue: VecDeque<ThreadId>,
109    /// The queue of reader threads waiting for this lock.
110    reader_queue: VecDeque<ThreadId>,
111    /// Data race clock for writers. Tracks the happens-before
112    /// ordering between each write access to a rwlock and is updated
113    /// after a sequence of concurrent readers to track the happens-
114    /// before ordering between the set of previous readers and
115    /// the current writer.
116    /// Contains the clock of the last thread to release a writer
117    /// lock or the joined clock of the set of last threads to release
118    /// shared reader locks.
119    clock_unlocked: VClock,
120    /// Data race clock for readers. This is temporary storage
121    /// for the combined happens-before ordering for between all
122    /// concurrent readers and the next writer, and the value
123    /// is stored to the main data_race variable once all
124    /// readers are finished.
125    /// Has to be stored separately since reader lock acquires
126    /// must load the clock of the last write and must not
127    /// add happens-before orderings between shared reader
128    /// locks.
129    /// This is only relevant when there is an active reader.
130    clock_current_readers: VClock,
131}
132
133impl RwLock {
134    #[inline]
135    /// Check if locked.
136    fn is_locked(&self) -> bool {
137        trace!(
138            "rwlock_is_locked: writer is {:?} and there are {} reader threads (some of which could hold multiple read locks)",
139            self.writer,
140            self.readers.len(),
141        );
142        self.writer.is_some() || self.readers.is_empty().not()
143    }
144
145    /// Check if write locked.
146    #[inline]
147    fn is_write_locked(&self) -> bool {
148        trace!("rwlock_is_write_locked: writer is {:?}", self.writer);
149        self.writer.is_some()
150    }
151}
152
153#[derive(Default, Clone, Debug)]
154pub struct RwLockRef(Rc<RefCell<RwLock>>);
155
156impl RwLockRef {
157    pub fn new() -> Self {
158        Self(Default::default())
159    }
160
161    pub fn is_locked(&self) -> bool {
162        self.0.borrow().is_locked()
163    }
164
165    pub fn is_write_locked(&self) -> bool {
166        self.0.borrow().is_write_locked()
167    }
168
169    pub fn queue_is_empty(&self) -> bool {
170        let inner = self.0.borrow();
171        inner.reader_queue.is_empty() && inner.writer_queue.is_empty()
172    }
173}
174
175impl VisitProvenance for RwLockRef {
176    // RwLock contains no provenance.
177    fn visit_provenance(&self, _visit: &mut VisitWith<'_>) {}
178}
179
180/// The conditional variable state.
181#[derive(Default, Debug)]
182struct Condvar {
183    waiters: VecDeque<ThreadId>,
184    /// Tracks the happens-before relationship
185    /// between a cond-var signal and a cond-var
186    /// wait during a non-spurious signal event.
187    /// Contains the clock of the last thread to
188    /// perform a condvar-signal.
189    clock: VClock,
190}
191
192#[derive(Default, Clone, Debug)]
193pub struct CondvarRef(Rc<RefCell<Condvar>>);
194
195impl CondvarRef {
196    pub fn new() -> Self {
197        Self(Default::default())
198    }
199
200    pub fn queue_is_empty(&self) -> bool {
201        self.0.borrow().waiters.is_empty()
202    }
203}
204
205impl VisitProvenance for CondvarRef {
206    // Condvar contains no provenance.
207    fn visit_provenance(&self, _visit: &mut VisitWith<'_>) {}
208}
209
210/// The futex state.
211#[derive(Default, Debug)]
212struct Futex {
213    waiters: Vec<FutexWaiter>,
214    /// Tracks the happens-before relationship
215    /// between a futex-wake and a futex-wait
216    /// during a non-spurious wake event.
217    /// Contains the clock of the last thread to
218    /// perform a futex-wake.
219    clock: VClock,
220}
221
222#[derive(Default, Clone, Debug)]
223pub struct FutexRef(Rc<RefCell<Futex>>);
224
225impl FutexRef {
226    pub fn new() -> Self {
227        Self(Default::default())
228    }
229
230    pub fn waiters(&self) -> usize {
231        self.0.borrow().waiters.len()
232    }
233}
234
235impl VisitProvenance for FutexRef {
236    // Futex contains no provenance.
237    fn visit_provenance(&self, _visit: &mut VisitWith<'_>) {}
238}
239
240/// A thread waiting on a futex.
241#[derive(Debug)]
242struct FutexWaiter {
243    /// The thread that is waiting on this futex.
244    thread: ThreadId,
245    /// The bitset used by FUTEX_*_BITSET, or u32::MAX for other operations.
246    bitset: u32,
247}
248
249// Private extension trait for local helper methods
250impl<'tcx> EvalContextExtPriv<'tcx> for crate::MiriInterpCx<'tcx> {}
251pub(super) trait EvalContextExtPriv<'tcx>: crate::MiriInterpCxExt<'tcx> {
252    fn condvar_reacquire_mutex(
253        &mut self,
254        mutex_ref: MutexRef,
255        retval: Scalar,
256        dest: MPlaceTy<'tcx>,
257    ) -> InterpResult<'tcx> {
258        let this = self.eval_context_mut();
259        if let Some(owner) = mutex_ref.owner() {
260            assert_ne!(owner, this.active_thread());
261            this.mutex_enqueue_and_block(mutex_ref, Some((retval, dest)));
262        } else {
263            // We can have it right now!
264            this.mutex_lock(&mutex_ref)?;
265            // Don't forget to write the return value.
266            this.write_scalar(retval, &dest)?;
267        }
268        interp_ok(())
269    }
270}
271
272impl<'tcx> AllocExtra<'tcx> {
273    fn get_sync<T: 'static>(&self, offset: Size) -> Option<&T> {
274        self.sync_objs.get(&offset).and_then(|s| s.downcast_ref::<T>())
275    }
276}
277
278// Public interface to synchronization objects. Please note that in most
279// cases, the function calls are infallible and it is the client's (shim
280// implementation's) responsibility to detect and deal with erroneous
281// situations.
282impl<'tcx> EvalContextExt<'tcx> for crate::MiriInterpCx<'tcx> {}
283pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
284    /// Get the synchronization object associated with the given pointer,
285    /// or initialize a new one.
286    ///
287    /// Return `None` if this pointer does not point to at least 1 byte of mutable memory.
288    fn get_sync_or_init<'a, T: SyncObj>(
289        &'a mut self,
290        ptr: Pointer,
291        new: impl FnOnce(&'a mut MiriMachine<'tcx>) -> T,
292    ) -> Option<&'a T>
293    where
294        'tcx: 'a,
295    {
296        let this = self.eval_context_mut();
297        if !this.ptr_try_get_alloc_id(ptr, 0).ok().is_some_and(|(alloc_id, offset, ..)| {
298            let info = this.get_alloc_info(alloc_id);
299            info.kind == AllocKind::LiveData && info.mutbl.is_mut() && offset < info.size
300        }) {
301            return None;
302        }
303        // This cannot fail now.
304        let (alloc, offset, _) = this.ptr_get_alloc_id(ptr, 0).unwrap();
305        let (alloc_extra, machine) = this.get_alloc_extra_mut(alloc).unwrap();
306        // Due to borrow checker reasons, we have to do the lookup twice.
307        if alloc_extra.get_sync::<T>(offset).is_none() {
308            let new = new(machine);
309            alloc_extra.sync_objs.insert(offset, Box::new(new));
310        }
311        Some(alloc_extra.get_sync::<T>(offset).unwrap())
312    }
313
314    /// Helper for "immovable" synchronization objects: the expected protocol for these objects is
315    /// that they use a static initializer of `uninit_val`, and we set them to `init_val` upon
316    /// initialization. At that point we also register a synchronization object, which is expected
317    /// to have `delete_on_write() == true`. So in the future, if we still see the object, we know
318    /// the location must still contain `init_val`. If the object is copied somewhere, that will
319    /// show up as a non-`init_val` value without a synchronization object, which we can then use to
320    /// error.
321    ///
322    /// `new_meta_obj` gets invoked when there is not yet an initialization object.
323    /// It has to ensure that the in-memory representation indeed matches `uninit_val`.
324    ///
325    /// The point of storing an `init_val` is so that if this memory gets copied somewhere else,
326    /// it does not look like the static initializer (i.e., `uninit_val`) any more. For some
327    /// objects we could just entirely forbid reading their bytes to ensure they don't get copied,
328    /// but that does not work for objects without a destructor (Windows `InitOnce`, macOS
329    /// `os_unfair_lock`).
330    fn get_immovable_sync_with_static_init<'a, T: SyncObj>(
331        &'a mut self,
332        obj: &MPlaceTy<'tcx>,
333        init_offset: Size,
334        uninit_val: u8,
335        init_val: u8,
336        new_meta_obj: impl FnOnce(&mut MiriInterpCx<'tcx>) -> InterpResult<'tcx, T>,
337    ) -> InterpResult<'tcx, &'a T>
338    where
339        'tcx: 'a,
340    {
341        assert!(init_val != uninit_val);
342        let this = self.eval_context_mut();
343        this.check_ptr_access(obj.ptr(), obj.layout.size, CheckInAllocMsg::Dereferenceable)?;
344        assert!(init_offset < obj.layout.size); // ensure our 1-byte flag fits
345        let init_field = obj.offset(init_offset, this.machine.layouts.u8, this)?;
346
347        let (alloc, offset, _) = this.ptr_get_alloc_id(init_field.ptr(), 0)?;
348        let (alloc_extra, _machine) = this.get_alloc_extra_mut(alloc)?;
349        // Due to borrow checker reasons, we have to do the lookup twice.
350        if alloc_extra.get_sync::<T>(offset).is_some() {
351            let (alloc_extra, _machine) = this.get_alloc_extra_mut(alloc).unwrap();
352            return interp_ok(alloc_extra.get_sync::<T>(offset).unwrap());
353        }
354
355        // There's no sync object there yet. Create one, and try a CAS for uninit_val to init_val.
356        let meta_obj = new_meta_obj(this)?;
357        let (old_init, success) = this
358            .atomic_compare_exchange_scalar(
359                &init_field,
360                &ImmTy::from_scalar(Scalar::from_u8(uninit_val), this.machine.layouts.u8),
361                Scalar::from_u8(init_val),
362                AtomicRwOrd::Relaxed,
363                AtomicReadOrd::Relaxed,
364                /* can_fail_spuriously */ false,
365            )?
366            .to_scalar_pair();
367        if !success.to_bool()? {
368            // This can happen for the macOS lock if it is already marked as initialized.
369            assert_eq!(
370                old_init.to_u8()?,
371                init_val,
372                "`new_meta_obj` should have ensured that this CAS succeeds"
373            );
374        }
375
376        let (alloc_extra, _machine) = this.get_alloc_extra_mut(alloc).unwrap();
377        assert!(meta_obj.delete_on_write());
378        alloc_extra.sync_objs.insert(offset, Box::new(meta_obj));
379        interp_ok(alloc_extra.get_sync::<T>(offset).unwrap())
380    }
381
382    /// Explicitly initializes an object that would usually be implicitly initialized with
383    /// `get_immovable_sync_with_static_init`.
384    fn init_immovable_sync<'a, T: SyncObj>(
385        &'a mut self,
386        obj: &MPlaceTy<'tcx>,
387        init_offset: Size,
388        init_val: u8,
389        new_meta_obj: T,
390    ) -> InterpResult<'tcx, Option<&'a T>>
391    where
392        'tcx: 'a,
393    {
394        let this = self.eval_context_mut();
395        this.check_ptr_access(obj.ptr(), obj.layout.size, CheckInAllocMsg::Dereferenceable)?;
396        assert!(init_offset < obj.layout.size); // ensure our 1-byte flag fits
397        let init_field = obj.offset(init_offset, this.machine.layouts.u8, this)?;
398
399        // Zero the entire object, and then store `init_val` directly.
400        this.write_bytes_ptr(obj.ptr(), iter::repeat_n(0, obj.layout.size.bytes_usize()))?;
401        this.write_scalar(Scalar::from_u8(init_val), &init_field)?;
402
403        // Create meta-level initialization object.
404        let (alloc, offset, _) = this.ptr_get_alloc_id(init_field.ptr(), 0)?;
405        let (alloc_extra, _machine) = this.get_alloc_extra_mut(alloc).unwrap();
406        assert!(new_meta_obj.delete_on_write());
407        alloc_extra.sync_objs.insert(offset, Box::new(new_meta_obj));
408        interp_ok(Some(alloc_extra.get_sync::<T>(offset).unwrap()))
409    }
410
411    /// Lock by setting the mutex owner and increasing the lock count.
412    fn mutex_lock(&mut self, mutex_ref: &MutexRef) -> InterpResult<'tcx> {
413        let this = self.eval_context_mut();
414        let thread = this.active_thread();
415        let mut mutex = mutex_ref.0.borrow_mut();
416        if let Some(current_owner) = mutex.owner {
417            assert_eq!(thread, current_owner, "mutex already locked by another thread");
418            assert!(
419                mutex.lock_count > 0,
420                "invariant violation: lock_count == 0 iff the thread is unlocked"
421            );
422        } else {
423            mutex.owner = Some(thread);
424        }
425        mutex.lock_count = mutex.lock_count.strict_add(1);
426        this.acquire_clock(&mutex.clock)?;
427        interp_ok(())
428    }
429
430    /// Try unlocking by decreasing the lock count and returning the old lock
431    /// count. If the lock count reaches 0, release the lock and potentially
432    /// give to a new owner. If the lock was not locked by the current thread,
433    /// return `None`.
434    fn mutex_unlock(&mut self, mutex_ref: &MutexRef) -> InterpResult<'tcx, Option<usize>> {
435        let this = self.eval_context_mut();
436        let mut mutex = mutex_ref.0.borrow_mut();
437        interp_ok(if let Some(current_owner) = mutex.owner {
438            // Mutex is locked.
439            if current_owner != this.machine.threads.active_thread() {
440                // Only the owner can unlock the mutex.
441                return interp_ok(None);
442            }
443            let old_lock_count = mutex.lock_count;
444            mutex.lock_count = old_lock_count.strict_sub(1);
445            if mutex.lock_count == 0 {
446                mutex.owner = None;
447                // The mutex is completely unlocked. Try transferring ownership
448                // to another thread.
449
450                this.release_clock(|clock| mutex.clock.clone_from(clock))?;
451                let thread_id = mutex.queue.pop_front();
452                // We need to drop our mutex borrow before unblock_thread
453                // because it will be borrowed again in the unblock callback.
454                drop(mutex);
455                if let Some(thread_id) = thread_id {
456                    this.unblock_thread(thread_id, BlockReason::Mutex)?;
457                }
458            }
459            Some(old_lock_count)
460        } else {
461            // Mutex is not locked.
462            None
463        })
464    }
465
466    /// Put the thread into the queue waiting for the mutex.
467    ///
468    /// Once the Mutex becomes available and if it exists, `retval_dest.0` will
469    /// be written to `retval_dest.1`.
470    #[inline]
471    fn mutex_enqueue_and_block(
472        &mut self,
473        mutex_ref: MutexRef,
474        retval_dest: Option<(Scalar, MPlaceTy<'tcx>)>,
475    ) {
476        let this = self.eval_context_mut();
477        let thread = this.active_thread();
478        let mut mutex = mutex_ref.0.borrow_mut();
479        mutex.queue.push_back(thread);
480        assert!(mutex.owner.is_some(), "queuing on unlocked mutex");
481        drop(mutex);
482        this.block_thread(
483            BlockReason::Mutex,
484            None,
485            callback!(
486                @capture<'tcx> {
487                    mutex_ref: MutexRef,
488                    retval_dest: Option<(Scalar, MPlaceTy<'tcx>)>,
489                }
490                |this, unblock: UnblockKind| {
491                    assert_eq!(unblock, UnblockKind::Ready);
492
493                    assert!(mutex_ref.owner().is_none());
494                    this.mutex_lock(&mutex_ref)?;
495
496                    if let Some((retval, dest)) = retval_dest {
497                        this.write_scalar(retval, &dest)?;
498                    }
499
500                    interp_ok(())
501                }
502            ),
503        );
504    }
505
506    /// Read-lock the lock by adding the `reader` the list of threads that own
507    /// this lock.
508    fn rwlock_reader_lock(&mut self, rwlock_ref: &RwLockRef) -> InterpResult<'tcx> {
509        let this = self.eval_context_mut();
510        let thread = this.active_thread();
511        trace!("rwlock_reader_lock: now also held (one more time) by {:?}", thread);
512        let mut rwlock = rwlock_ref.0.borrow_mut();
513        assert!(!rwlock.is_write_locked(), "the lock is write locked");
514        let count = rwlock.readers.entry(thread).or_insert(0);
515        *count = count.strict_add(1);
516        this.acquire_clock(&rwlock.clock_unlocked)?;
517        interp_ok(())
518    }
519
520    /// Try read-unlock the lock for the current threads and potentially give the lock to a new owner.
521    /// Returns `true` if succeeded, `false` if this `reader` did not hold the lock.
522    fn rwlock_reader_unlock(&mut self, rwlock_ref: &RwLockRef) -> InterpResult<'tcx, bool> {
523        let this = self.eval_context_mut();
524        let thread = this.active_thread();
525        let mut rwlock = rwlock_ref.0.borrow_mut();
526        match rwlock.readers.entry(thread) {
527            Entry::Occupied(mut entry) => {
528                let count = entry.get_mut();
529                assert!(*count > 0, "rwlock locked with count == 0");
530                *count -= 1;
531                if *count == 0 {
532                    trace!("rwlock_reader_unlock: no longer held by {:?}", thread);
533                    entry.remove();
534                } else {
535                    trace!("rwlock_reader_unlock: held one less time by {:?}", thread);
536                }
537            }
538            Entry::Vacant(_) => return interp_ok(false), // we did not even own this lock
539        }
540        // Add this to the shared-release clock of all concurrent readers.
541        this.release_clock(|clock| rwlock.clock_current_readers.join(clock))?;
542
543        // The thread was a reader. If the lock is not held any more, give it to a writer.
544        if rwlock.is_locked().not() {
545            // All the readers are finished, so set the writer data-race handle to the value
546            // of the union of all reader data race handles, since the set of readers
547            // happen-before the writers
548            let rwlock_ref = &mut *rwlock;
549            rwlock_ref.clock_unlocked.clone_from(&rwlock_ref.clock_current_readers);
550            // See if there is a thread to unblock.
551            if let Some(writer) = rwlock_ref.writer_queue.pop_front() {
552                drop(rwlock); // make RefCell available for unblock callback
553                this.unblock_thread(writer, BlockReason::RwLock)?;
554            }
555        }
556        interp_ok(true)
557    }
558
559    /// Put the reader in the queue waiting for the lock and block it.
560    /// Once the lock becomes available, `retval` will be written to `dest`.
561    #[inline]
562    fn rwlock_enqueue_and_block_reader(
563        &mut self,
564        rwlock_ref: RwLockRef,
565        retval: Scalar,
566        dest: MPlaceTy<'tcx>,
567    ) {
568        let this = self.eval_context_mut();
569        let thread = this.active_thread();
570        let mut rwlock = rwlock_ref.0.borrow_mut();
571        rwlock.reader_queue.push_back(thread);
572        assert!(rwlock.is_write_locked(), "read-queueing on not write locked rwlock");
573        drop(rwlock);
574        this.block_thread(
575            BlockReason::RwLock,
576            None,
577            callback!(
578                @capture<'tcx> {
579                    rwlock_ref: RwLockRef,
580                    retval: Scalar,
581                    dest: MPlaceTy<'tcx>,
582                }
583                |this, unblock: UnblockKind| {
584                    assert_eq!(unblock, UnblockKind::Ready);
585                    this.rwlock_reader_lock(&rwlock_ref)?;
586                    this.write_scalar(retval, &dest)?;
587                    interp_ok(())
588                }
589            ),
590        );
591    }
592
593    /// Lock by setting the writer that owns the lock.
594    #[inline]
595    fn rwlock_writer_lock(&mut self, rwlock_ref: &RwLockRef) -> InterpResult<'tcx> {
596        let this = self.eval_context_mut();
597        let thread = this.active_thread();
598        trace!("rwlock_writer_lock: now held by {:?}", thread);
599
600        let mut rwlock = rwlock_ref.0.borrow_mut();
601        assert!(!rwlock.is_locked(), "the rwlock is already locked");
602        rwlock.writer = Some(thread);
603        this.acquire_clock(&rwlock.clock_unlocked)?;
604        interp_ok(())
605    }
606
607    /// Try to unlock an rwlock held by the current thread.
608    /// Return `false` if it is held by another thread.
609    #[inline]
610    fn rwlock_writer_unlock(&mut self, rwlock_ref: &RwLockRef) -> InterpResult<'tcx, bool> {
611        let this = self.eval_context_mut();
612        let thread = this.active_thread();
613        let mut rwlock = rwlock_ref.0.borrow_mut();
614        interp_ok(if let Some(current_writer) = rwlock.writer {
615            if current_writer != thread {
616                // Only the owner can unlock the rwlock.
617                return interp_ok(false);
618            }
619            rwlock.writer = None;
620            trace!("rwlock_writer_unlock: unlocked by {:?}", thread);
621            // Record release clock for next lock holder.
622            this.release_clock(|clock| rwlock.clock_unlocked.clone_from(clock))?;
623
624            // The thread was a writer.
625            //
626            // We are prioritizing writers here against the readers. As a
627            // result, not only readers can starve writers, but also writers can
628            // starve readers.
629            if let Some(writer) = rwlock.writer_queue.pop_front() {
630                drop(rwlock); // make RefCell available for unblock callback
631                this.unblock_thread(writer, BlockReason::RwLock)?;
632            } else {
633                // Take the entire read queue and wake them all up.
634                let readers = std::mem::take(&mut rwlock.reader_queue);
635                drop(rwlock); // make RefCell available for unblock callback
636                for reader in readers {
637                    this.unblock_thread(reader, BlockReason::RwLock)?;
638                }
639            }
640            true
641        } else {
642            false
643        })
644    }
645
646    /// Put the writer in the queue waiting for the lock.
647    /// Once the lock becomes available, `retval` will be written to `dest`.
648    #[inline]
649    fn rwlock_enqueue_and_block_writer(
650        &mut self,
651        rwlock_ref: RwLockRef,
652        retval: Scalar,
653        dest: MPlaceTy<'tcx>,
654    ) {
655        let this = self.eval_context_mut();
656        let thread = this.active_thread();
657        let mut rwlock = rwlock_ref.0.borrow_mut();
658        rwlock.writer_queue.push_back(thread);
659        assert!(rwlock.is_locked(), "write-queueing on unlocked rwlock");
660        drop(rwlock);
661        this.block_thread(
662            BlockReason::RwLock,
663            None,
664            callback!(
665                @capture<'tcx> {
666                    rwlock_ref: RwLockRef,
667                    retval: Scalar,
668                    dest: MPlaceTy<'tcx>,
669                }
670                |this, unblock: UnblockKind| {
671                    assert_eq!(unblock, UnblockKind::Ready);
672                    this.rwlock_writer_lock(&rwlock_ref)?;
673                    this.write_scalar(retval, &dest)?;
674                    interp_ok(())
675                }
676            ),
677        );
678    }
679
680    /// Release the mutex and let the current thread wait on the given condition variable.
681    /// Once it is signaled, the mutex will be acquired and `retval_succ` will be written to `dest`.
682    /// If the timeout happens first, `retval_timeout` will be written to `dest`.
683    fn condvar_wait(
684        &mut self,
685        condvar_ref: CondvarRef,
686        mutex_ref: MutexRef,
687        timeout: Option<(TimeoutClock, TimeoutAnchor, Duration)>,
688        retval_succ: Scalar,
689        retval_timeout: Scalar,
690        dest: MPlaceTy<'tcx>,
691    ) -> InterpResult<'tcx> {
692        let this = self.eval_context_mut();
693        if let Some(old_locked_count) = this.mutex_unlock(&mutex_ref)? {
694            if old_locked_count != 1 {
695                throw_unsup_format!(
696                    "awaiting a condvar on a mutex acquired multiple times is not supported"
697                );
698            }
699        } else {
700            throw_ub_format!(
701                "awaiting a condvar on a mutex that is unlocked or owned by a different thread"
702            );
703        }
704        let thread = this.active_thread();
705
706        condvar_ref.0.borrow_mut().waiters.push_back(thread);
707        this.block_thread(
708            BlockReason::Condvar,
709            timeout,
710            callback!(
711                @capture<'tcx> {
712                    condvar_ref: CondvarRef,
713                    mutex_ref: MutexRef,
714                    retval_succ: Scalar,
715                    retval_timeout: Scalar,
716                    dest: MPlaceTy<'tcx>,
717                }
718                |this, unblock: UnblockKind| {
719                    match unblock {
720                        UnblockKind::Ready => {
721                            // The condvar was signaled. Make sure we get the clock for that.
722                            this.acquire_clock(
723                                    &condvar_ref.0.borrow().clock,
724                                )?;
725                            // Try to acquire the mutex.
726                            // The timeout only applies to the first wait (until the signal), not for mutex acquisition.
727                            this.condvar_reacquire_mutex(mutex_ref, retval_succ, dest)
728                        }
729                        UnblockKind::TimedOut => {
730                            // We have to remove the waiter from the queue again.
731                            let thread = this.active_thread();
732                            let waiters = &mut condvar_ref.0.borrow_mut().waiters;
733                            waiters.retain(|waiter| *waiter != thread);
734                            // Now get back the lock.
735                            this.condvar_reacquire_mutex(mutex_ref, retval_timeout, dest)
736                        }
737                    }
738                }
739            ),
740        );
741        interp_ok(())
742    }
743
744    /// Wake up some thread (if there is any) sleeping on the conditional
745    /// variable. Returns `true` iff any thread was woken up.
746    fn condvar_signal(&mut self, condvar_ref: &CondvarRef) -> InterpResult<'tcx, bool> {
747        let this = self.eval_context_mut();
748        let mut condvar = condvar_ref.0.borrow_mut();
749
750        // Each condvar signal happens-before the end of the condvar wake
751        this.release_clock(|clock| condvar.clock.clone_from(clock))?;
752        let Some(waiter) = condvar.waiters.pop_front() else {
753            return interp_ok(false);
754        };
755        drop(condvar);
756        this.unblock_thread(waiter, BlockReason::Condvar)?;
757        interp_ok(true)
758    }
759
760    /// Wait for the futex to be signaled, or a timeout. Once the thread is
761    /// unblocked, `callback` is called with the unblock reason.
762    fn futex_wait(
763        &mut self,
764        futex_ref: FutexRef,
765        bitset: u32,
766        timeout: Option<(TimeoutClock, TimeoutAnchor, Duration)>,
767        callback: DynUnblockCallback<'tcx>,
768    ) {
769        let this = self.eval_context_mut();
770        let thread = this.active_thread();
771        let mut futex = futex_ref.0.borrow_mut();
772        let waiters = &mut futex.waiters;
773        assert!(waiters.iter().all(|waiter| waiter.thread != thread), "thread is already waiting");
774        waiters.push(FutexWaiter { thread, bitset });
775        drop(futex);
776
777        this.block_thread(
778            BlockReason::Futex,
779            timeout,
780            callback!(
781                @capture<'tcx> {
782                    futex_ref: FutexRef,
783                    callback: DynUnblockCallback<'tcx>,
784                }
785                |this, unblock: UnblockKind| {
786                    match unblock {
787                        UnblockKind::Ready => {
788                            let futex = futex_ref.0.borrow();
789                            // Acquire the clock of the futex.
790                            this.acquire_clock(&futex.clock)?;
791                        },
792                        UnblockKind::TimedOut => {
793                            // Remove the waiter from the futex.
794                            let thread = this.active_thread();
795                            let mut futex = futex_ref.0.borrow_mut();
796                            futex.waiters.retain(|waiter| waiter.thread != thread);
797                        },
798                    }
799
800                    callback.call(this, unblock)
801                }
802            ),
803        );
804    }
805
806    /// Wake up `count` of the threads in the queue that match any of the bits
807    /// in the bitset. Returns how many threads were woken.
808    fn futex_wake(
809        &mut self,
810        futex_ref: &FutexRef,
811        bitset: u32,
812        count: usize,
813    ) -> InterpResult<'tcx, usize> {
814        let this = self.eval_context_mut();
815        let mut futex = futex_ref.0.borrow_mut();
816
817        // Each futex-wake happens-before the end of the futex wait
818        this.release_clock(|clock| futex.clock.clone_from(clock))?;
819
820        // Remove `count` of the threads in the queue that match any of the bits in the bitset.
821        // We collect all of them before unblocking because the unblock callback may access the
822        // futex state to retrieve the remaining number of waiters on macOS.
823        let waiters: Vec<_> =
824            futex.waiters.extract_if(.., |w| w.bitset & bitset != 0).take(count).collect();
825        drop(futex);
826
827        let woken = waiters.len();
828        for waiter in waiters {
829            this.unblock_thread(waiter.thread, BlockReason::Futex)?;
830        }
831
832        interp_ok(woken)
833    }
834}