miri/concurrency/
sync.rs

1use std::cell::RefCell;
2use std::collections::VecDeque;
3use std::collections::hash_map::Entry;
4use std::default::Default;
5use std::ops::Not;
6use std::rc::Rc;
7use std::time::Duration;
8
9use rustc_abi::Size;
10use rustc_data_structures::fx::FxHashMap;
11
12use super::vector_clock::VClock;
13use crate::*;
14
15/// The mutex state.
16#[derive(Default, Debug)]
17struct Mutex {
18    /// The thread that currently owns the lock.
19    owner: Option<ThreadId>,
20    /// How many times the mutex was locked by the owner.
21    lock_count: usize,
22    /// The queue of threads waiting for this mutex.
23    queue: VecDeque<ThreadId>,
24    /// Mutex clock. This tracks the moment of the last unlock.
25    clock: VClock,
26}
27
28#[derive(Default, Clone, Debug)]
29pub struct MutexRef(Rc<RefCell<Mutex>>);
30
31impl MutexRef {
32    pub fn new() -> Self {
33        Self(Default::default())
34    }
35
36    /// Get the id of the thread that currently owns this lock, or `None` if it is not locked.
37    pub fn owner(&self) -> Option<ThreadId> {
38        self.0.borrow().owner
39    }
40}
41
42impl VisitProvenance for MutexRef {
43    // Mutex contains no provenance.
44    fn visit_provenance(&self, _visit: &mut VisitWith<'_>) {}
45}
46
47/// The read-write lock state.
48#[derive(Default, Debug)]
49struct RwLock {
50    /// The writer thread that currently owns the lock.
51    writer: Option<ThreadId>,
52    /// The readers that currently own the lock and how many times they acquired
53    /// the lock.
54    readers: FxHashMap<ThreadId, usize>,
55    /// The queue of writer threads waiting for this lock.
56    writer_queue: VecDeque<ThreadId>,
57    /// The queue of reader threads waiting for this lock.
58    reader_queue: VecDeque<ThreadId>,
59    /// Data race clock for writers. Tracks the happens-before
60    /// ordering between each write access to a rwlock and is updated
61    /// after a sequence of concurrent readers to track the happens-
62    /// before ordering between the set of previous readers and
63    /// the current writer.
64    /// Contains the clock of the last thread to release a writer
65    /// lock or the joined clock of the set of last threads to release
66    /// shared reader locks.
67    clock_unlocked: VClock,
68    /// Data race clock for readers. This is temporary storage
69    /// for the combined happens-before ordering for between all
70    /// concurrent readers and the next writer, and the value
71    /// is stored to the main data_race variable once all
72    /// readers are finished.
73    /// Has to be stored separately since reader lock acquires
74    /// must load the clock of the last write and must not
75    /// add happens-before orderings between shared reader
76    /// locks.
77    /// This is only relevant when there is an active reader.
78    clock_current_readers: VClock,
79}
80
81impl RwLock {
82    #[inline]
83    /// Check if locked.
84    fn is_locked(&self) -> bool {
85        trace!(
86            "rwlock_is_locked: writer is {:?} and there are {} reader threads (some of which could hold multiple read locks)",
87            self.writer,
88            self.readers.len(),
89        );
90        self.writer.is_some() || self.readers.is_empty().not()
91    }
92
93    /// Check if write locked.
94    #[inline]
95    fn is_write_locked(&self) -> bool {
96        trace!("rwlock_is_write_locked: writer is {:?}", self.writer);
97        self.writer.is_some()
98    }
99}
100
101#[derive(Default, Clone, Debug)]
102pub struct RwLockRef(Rc<RefCell<RwLock>>);
103
104impl RwLockRef {
105    pub fn new() -> Self {
106        Self(Default::default())
107    }
108
109    pub fn is_locked(&self) -> bool {
110        self.0.borrow().is_locked()
111    }
112
113    pub fn is_write_locked(&self) -> bool {
114        self.0.borrow().is_write_locked()
115    }
116}
117
118impl VisitProvenance for RwLockRef {
119    // RwLock contains no provenance.
120    fn visit_provenance(&self, _visit: &mut VisitWith<'_>) {}
121}
122
123/// The conditional variable state.
124#[derive(Default, Debug)]
125struct Condvar {
126    waiters: VecDeque<ThreadId>,
127    /// Tracks the happens-before relationship
128    /// between a cond-var signal and a cond-var
129    /// wait during a non-spurious signal event.
130    /// Contains the clock of the last thread to
131    /// perform a condvar-signal.
132    clock: VClock,
133}
134
135#[derive(Default, Clone, Debug)]
136pub struct CondvarRef(Rc<RefCell<Condvar>>);
137
138impl CondvarRef {
139    pub fn new() -> Self {
140        Self(Default::default())
141    }
142
143    pub fn is_awaited(&self) -> bool {
144        !self.0.borrow().waiters.is_empty()
145    }
146}
147
148impl VisitProvenance for CondvarRef {
149    // Condvar contains no provenance.
150    fn visit_provenance(&self, _visit: &mut VisitWith<'_>) {}
151}
152
153/// The futex state.
154#[derive(Default, Debug)]
155struct Futex {
156    waiters: Vec<FutexWaiter>,
157    /// Tracks the happens-before relationship
158    /// between a futex-wake and a futex-wait
159    /// during a non-spurious wake event.
160    /// Contains the clock of the last thread to
161    /// perform a futex-wake.
162    clock: VClock,
163}
164
165#[derive(Default, Clone, Debug)]
166pub struct FutexRef(Rc<RefCell<Futex>>);
167
168impl FutexRef {
169    pub fn new() -> Self {
170        Self(Default::default())
171    }
172
173    pub fn waiters(&self) -> usize {
174        self.0.borrow().waiters.len()
175    }
176}
177
178impl VisitProvenance for FutexRef {
179    // Futex contains no provenance.
180    fn visit_provenance(&self, _visit: &mut VisitWith<'_>) {}
181}
182
183/// A thread waiting on a futex.
184#[derive(Debug)]
185struct FutexWaiter {
186    /// The thread that is waiting on this futex.
187    thread: ThreadId,
188    /// The bitset used by FUTEX_*_BITSET, or u32::MAX for other operations.
189    bitset: u32,
190}
191
192// Private extension trait for local helper methods
193impl<'tcx> EvalContextExtPriv<'tcx> for crate::MiriInterpCx<'tcx> {}
194pub(super) trait EvalContextExtPriv<'tcx>: crate::MiriInterpCxExt<'tcx> {
195    fn condvar_reacquire_mutex(
196        &mut self,
197        mutex_ref: MutexRef,
198        retval: Scalar,
199        dest: MPlaceTy<'tcx>,
200    ) -> InterpResult<'tcx> {
201        let this = self.eval_context_mut();
202        if let Some(owner) = mutex_ref.owner() {
203            assert_ne!(owner, this.active_thread());
204            this.mutex_enqueue_and_block(mutex_ref, Some((retval, dest)));
205        } else {
206            // We can have it right now!
207            this.mutex_lock(&mutex_ref);
208            // Don't forget to write the return value.
209            this.write_scalar(retval, &dest)?;
210        }
211        interp_ok(())
212    }
213}
214
215impl<'tcx> AllocExtra<'tcx> {
216    fn get_sync<T: 'static>(&self, offset: Size) -> Option<&T> {
217        self.sync.get(&offset).and_then(|s| s.downcast_ref::<T>())
218    }
219}
220
221/// We designate an `init`` field in all primitives.
222/// If `init` is set to this, we consider the primitive initialized.
223pub const LAZY_INIT_COOKIE: u32 = 0xcafe_affe;
224
225// Public interface to synchronization primitives. Please note that in most
226// cases, the function calls are infallible and it is the client's (shim
227// implementation's) responsibility to detect and deal with erroneous
228// situations.
229impl<'tcx> EvalContextExt<'tcx> for crate::MiriInterpCx<'tcx> {}
230pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
231    /// Helper for lazily initialized `alloc_extra.sync` data:
232    /// this forces an immediate init.
233    /// Return a reference to the data in the machine state.
234    fn lazy_sync_init<'a, T: 'static>(
235        &'a mut self,
236        primitive: &MPlaceTy<'tcx>,
237        init_offset: Size,
238        data: T,
239    ) -> InterpResult<'tcx, &'a T>
240    where
241        'tcx: 'a,
242    {
243        let this = self.eval_context_mut();
244
245        let (alloc, offset, _) = this.ptr_get_alloc_id(primitive.ptr(), 0)?;
246        let (alloc_extra, _machine) = this.get_alloc_extra_mut(alloc)?;
247        alloc_extra.sync.insert(offset, Box::new(data));
248        // Mark this as "initialized".
249        let init_field = primitive.offset(init_offset, this.machine.layouts.u32, this)?;
250        this.write_scalar_atomic(
251            Scalar::from_u32(LAZY_INIT_COOKIE),
252            &init_field,
253            AtomicWriteOrd::Relaxed,
254        )?;
255        interp_ok(this.get_alloc_extra(alloc)?.get_sync::<T>(offset).unwrap())
256    }
257
258    /// Helper for lazily initialized `alloc_extra.sync` data:
259    /// Checks if the primitive is initialized:
260    /// - If yes, fetches the data from `alloc_extra.sync`, or calls `missing_data` if that fails
261    ///   and stores that in `alloc_extra.sync`.
262    /// - Otherwise, calls `new_data` to initialize the primitive.
263    ///
264    /// Return a reference to the data in the machine state.
265    fn lazy_sync_get_data<'a, T: 'static>(
266        &'a mut self,
267        primitive: &MPlaceTy<'tcx>,
268        init_offset: Size,
269        missing_data: impl FnOnce() -> InterpResult<'tcx, T>,
270        new_data: impl FnOnce(&mut MiriInterpCx<'tcx>) -> InterpResult<'tcx, T>,
271    ) -> InterpResult<'tcx, &'a T>
272    where
273        'tcx: 'a,
274    {
275        let this = self.eval_context_mut();
276
277        // Check if this is already initialized. Needs to be atomic because we can race with another
278        // thread initializing. Needs to be an RMW operation to ensure we read the *latest* value.
279        // So we just try to replace MUTEX_INIT_COOKIE with itself.
280        let init_cookie = Scalar::from_u32(LAZY_INIT_COOKIE);
281        let init_field = primitive.offset(init_offset, this.machine.layouts.u32, this)?;
282        let (_init, success) = this
283            .atomic_compare_exchange_scalar(
284                &init_field,
285                &ImmTy::from_scalar(init_cookie, this.machine.layouts.u32),
286                init_cookie,
287                AtomicRwOrd::Relaxed,
288                AtomicReadOrd::Relaxed,
289                /* can_fail_spuriously */ false,
290            )?
291            .to_scalar_pair();
292
293        if success.to_bool()? {
294            // If it is initialized, it must be found in the "sync primitive" table,
295            // or else it has been moved illegally.
296            let (alloc, offset, _) = this.ptr_get_alloc_id(primitive.ptr(), 0)?;
297            let (alloc_extra, _machine) = this.get_alloc_extra_mut(alloc)?;
298            // Due to borrow checker reasons, we have to do the lookup twice.
299            if alloc_extra.get_sync::<T>(offset).is_none() {
300                let data = missing_data()?;
301                alloc_extra.sync.insert(offset, Box::new(data));
302            }
303            interp_ok(alloc_extra.get_sync::<T>(offset).unwrap())
304        } else {
305            let data = new_data(this)?;
306            this.lazy_sync_init(primitive, init_offset, data)
307        }
308    }
309
310    /// Get the synchronization primitive associated with the given pointer,
311    /// or initialize a new one.
312    ///
313    /// Return `None` if this pointer does not point to at least 1 byte of mutable memory.
314    fn get_sync_or_init<'a, T: 'static>(
315        &'a mut self,
316        ptr: Pointer,
317        new: impl FnOnce(&'a mut MiriMachine<'tcx>) -> T,
318    ) -> Option<&'a T>
319    where
320        'tcx: 'a,
321    {
322        let this = self.eval_context_mut();
323        if !this.ptr_try_get_alloc_id(ptr, 0).ok().is_some_and(|(alloc_id, offset, ..)| {
324            let info = this.get_alloc_info(alloc_id);
325            info.kind == AllocKind::LiveData && info.mutbl.is_mut() && offset < info.size
326        }) {
327            return None;
328        }
329        // This cannot fail now.
330        let (alloc, offset, _) = this.ptr_get_alloc_id(ptr, 0).unwrap();
331        let (alloc_extra, machine) = this.get_alloc_extra_mut(alloc).unwrap();
332        // Due to borrow checker reasons, we have to do the lookup twice.
333        if alloc_extra.get_sync::<T>(offset).is_none() {
334            let new = new(machine);
335            alloc_extra.sync.insert(offset, Box::new(new));
336        }
337        Some(alloc_extra.get_sync::<T>(offset).unwrap())
338    }
339
340    /// Lock by setting the mutex owner and increasing the lock count.
341    fn mutex_lock(&mut self, mutex_ref: &MutexRef) {
342        let this = self.eval_context_mut();
343        let thread = this.active_thread();
344        let mut mutex = mutex_ref.0.borrow_mut();
345        if let Some(current_owner) = mutex.owner {
346            assert_eq!(thread, current_owner, "mutex already locked by another thread");
347            assert!(
348                mutex.lock_count > 0,
349                "invariant violation: lock_count == 0 iff the thread is unlocked"
350            );
351        } else {
352            mutex.owner = Some(thread);
353        }
354        mutex.lock_count = mutex.lock_count.strict_add(1);
355        if let Some(data_race) = this.machine.data_race.as_vclocks_ref() {
356            data_race.acquire_clock(&mutex.clock, &this.machine.threads);
357        }
358    }
359
360    /// Try unlocking by decreasing the lock count and returning the old lock
361    /// count. If the lock count reaches 0, release the lock and potentially
362    /// give to a new owner. If the lock was not locked by the current thread,
363    /// return `None`.
364    fn mutex_unlock(&mut self, mutex_ref: &MutexRef) -> InterpResult<'tcx, Option<usize>> {
365        let this = self.eval_context_mut();
366        let mut mutex = mutex_ref.0.borrow_mut();
367        interp_ok(if let Some(current_owner) = mutex.owner {
368            // Mutex is locked.
369            if current_owner != this.machine.threads.active_thread() {
370                // Only the owner can unlock the mutex.
371                return interp_ok(None);
372            }
373            let old_lock_count = mutex.lock_count;
374            mutex.lock_count = old_lock_count.strict_sub(1);
375            if mutex.lock_count == 0 {
376                mutex.owner = None;
377                // The mutex is completely unlocked. Try transferring ownership
378                // to another thread.
379
380                if let Some(data_race) = this.machine.data_race.as_vclocks_ref() {
381                    data_race.release_clock(&this.machine.threads, |clock| {
382                        mutex.clock.clone_from(clock)
383                    });
384                }
385                let thread_id = mutex.queue.pop_front();
386                // We need to drop our mutex borrow before unblock_thread
387                // because it will be borrowed again in the unblock callback.
388                drop(mutex);
389                if thread_id.is_some() {
390                    this.unblock_thread(thread_id.unwrap(), BlockReason::Mutex)?;
391                }
392            }
393            Some(old_lock_count)
394        } else {
395            // Mutex is not locked.
396            None
397        })
398    }
399
400    /// Put the thread into the queue waiting for the mutex.
401    ///
402    /// Once the Mutex becomes available and if it exists, `retval_dest.0` will
403    /// be written to `retval_dest.1`.
404    #[inline]
405    fn mutex_enqueue_and_block(
406        &mut self,
407        mutex_ref: MutexRef,
408        retval_dest: Option<(Scalar, MPlaceTy<'tcx>)>,
409    ) {
410        let this = self.eval_context_mut();
411        let thread = this.active_thread();
412        let mut mutex = mutex_ref.0.borrow_mut();
413        mutex.queue.push_back(thread);
414        assert!(mutex.owner.is_some(), "queuing on unlocked mutex");
415        drop(mutex);
416        this.block_thread(
417            BlockReason::Mutex,
418            None,
419            callback!(
420                @capture<'tcx> {
421                    mutex_ref: MutexRef,
422                    retval_dest: Option<(Scalar, MPlaceTy<'tcx>)>,
423                }
424                |this, unblock: UnblockKind| {
425                    assert_eq!(unblock, UnblockKind::Ready);
426
427                    assert!(mutex_ref.owner().is_none());
428                    this.mutex_lock(&mutex_ref);
429
430                    if let Some((retval, dest)) = retval_dest {
431                        this.write_scalar(retval, &dest)?;
432                    }
433
434                    interp_ok(())
435                }
436            ),
437        );
438    }
439
440    /// Read-lock the lock by adding the `reader` the list of threads that own
441    /// this lock.
442    fn rwlock_reader_lock(&mut self, rwlock_ref: &RwLockRef) {
443        let this = self.eval_context_mut();
444        let thread = this.active_thread();
445        trace!("rwlock_reader_lock: now also held (one more time) by {:?}", thread);
446        let mut rwlock = rwlock_ref.0.borrow_mut();
447        assert!(!rwlock.is_write_locked(), "the lock is write locked");
448        let count = rwlock.readers.entry(thread).or_insert(0);
449        *count = count.strict_add(1);
450        if let Some(data_race) = this.machine.data_race.as_vclocks_ref() {
451            data_race.acquire_clock(&rwlock.clock_unlocked, &this.machine.threads);
452        }
453    }
454
455    /// Try read-unlock the lock for the current threads and potentially give the lock to a new owner.
456    /// Returns `true` if succeeded, `false` if this `reader` did not hold the lock.
457    fn rwlock_reader_unlock(&mut self, rwlock_ref: &RwLockRef) -> InterpResult<'tcx, bool> {
458        let this = self.eval_context_mut();
459        let thread = this.active_thread();
460        let mut rwlock = rwlock_ref.0.borrow_mut();
461        match rwlock.readers.entry(thread) {
462            Entry::Occupied(mut entry) => {
463                let count = entry.get_mut();
464                assert!(*count > 0, "rwlock locked with count == 0");
465                *count -= 1;
466                if *count == 0 {
467                    trace!("rwlock_reader_unlock: no longer held by {:?}", thread);
468                    entry.remove();
469                } else {
470                    trace!("rwlock_reader_unlock: held one less time by {:?}", thread);
471                }
472            }
473            Entry::Vacant(_) => return interp_ok(false), // we did not even own this lock
474        }
475        if let Some(data_race) = this.machine.data_race.as_vclocks_ref() {
476            // Add this to the shared-release clock of all concurrent readers.
477            data_race.release_clock(&this.machine.threads, |clock| {
478                rwlock.clock_current_readers.join(clock)
479            });
480        }
481
482        // The thread was a reader. If the lock is not held any more, give it to a writer.
483        if rwlock.is_locked().not() {
484            // All the readers are finished, so set the writer data-race handle to the value
485            // of the union of all reader data race handles, since the set of readers
486            // happen-before the writers
487            let rwlock_ref = &mut *rwlock;
488            rwlock_ref.clock_unlocked.clone_from(&rwlock_ref.clock_current_readers);
489            // See if there is a thread to unblock.
490            if let Some(writer) = rwlock_ref.writer_queue.pop_front() {
491                drop(rwlock); // make RefCell available for unblock callback
492                this.unblock_thread(writer, BlockReason::RwLock)?;
493            }
494        }
495        interp_ok(true)
496    }
497
498    /// Put the reader in the queue waiting for the lock and block it.
499    /// Once the lock becomes available, `retval` will be written to `dest`.
500    #[inline]
501    fn rwlock_enqueue_and_block_reader(
502        &mut self,
503        rwlock_ref: RwLockRef,
504        retval: Scalar,
505        dest: MPlaceTy<'tcx>,
506    ) {
507        let this = self.eval_context_mut();
508        let thread = this.active_thread();
509        let mut rwlock = rwlock_ref.0.borrow_mut();
510        rwlock.reader_queue.push_back(thread);
511        assert!(rwlock.is_write_locked(), "read-queueing on not write locked rwlock");
512        drop(rwlock);
513        this.block_thread(
514            BlockReason::RwLock,
515            None,
516            callback!(
517                @capture<'tcx> {
518                    rwlock_ref: RwLockRef,
519                    retval: Scalar,
520                    dest: MPlaceTy<'tcx>,
521                }
522                |this, unblock: UnblockKind| {
523                    assert_eq!(unblock, UnblockKind::Ready);
524                    this.rwlock_reader_lock(&rwlock_ref);
525                    this.write_scalar(retval, &dest)?;
526                    interp_ok(())
527                }
528            ),
529        );
530    }
531
532    /// Lock by setting the writer that owns the lock.
533    #[inline]
534    fn rwlock_writer_lock(&mut self, rwlock_ref: &RwLockRef) {
535        let this = self.eval_context_mut();
536        let thread = this.active_thread();
537        trace!("rwlock_writer_lock: now held by {:?}", thread);
538
539        let mut rwlock = rwlock_ref.0.borrow_mut();
540        assert!(!rwlock.is_locked(), "the rwlock is already locked");
541        rwlock.writer = Some(thread);
542        if let Some(data_race) = this.machine.data_race.as_vclocks_ref() {
543            data_race.acquire_clock(&rwlock.clock_unlocked, &this.machine.threads);
544        }
545    }
546
547    /// Try to unlock an rwlock held by the current thread.
548    /// Return `false` if it is held by another thread.
549    #[inline]
550    fn rwlock_writer_unlock(&mut self, rwlock_ref: &RwLockRef) -> InterpResult<'tcx, bool> {
551        let this = self.eval_context_mut();
552        let thread = this.active_thread();
553        let mut rwlock = rwlock_ref.0.borrow_mut();
554        interp_ok(if let Some(current_writer) = rwlock.writer {
555            if current_writer != thread {
556                // Only the owner can unlock the rwlock.
557                return interp_ok(false);
558            }
559            rwlock.writer = None;
560            trace!("rwlock_writer_unlock: unlocked by {:?}", thread);
561            // Record release clock for next lock holder.
562            if let Some(data_race) = this.machine.data_race.as_vclocks_ref() {
563                data_race.release_clock(&this.machine.threads, |clock| {
564                    rwlock.clock_unlocked.clone_from(clock)
565                });
566            }
567
568            // The thread was a writer.
569            //
570            // We are prioritizing writers here against the readers. As a
571            // result, not only readers can starve writers, but also writers can
572            // starve readers.
573            if let Some(writer) = rwlock.writer_queue.pop_front() {
574                drop(rwlock); // make RefCell available for unblock callback
575                this.unblock_thread(writer, BlockReason::RwLock)?;
576            } else {
577                // Take the entire read queue and wake them all up.
578                let readers = std::mem::take(&mut rwlock.reader_queue);
579                drop(rwlock); // make RefCell available for unblock callback
580                for reader in readers {
581                    this.unblock_thread(reader, BlockReason::RwLock)?;
582                }
583            }
584            true
585        } else {
586            false
587        })
588    }
589
590    /// Put the writer in the queue waiting for the lock.
591    /// Once the lock becomes available, `retval` will be written to `dest`.
592    #[inline]
593    fn rwlock_enqueue_and_block_writer(
594        &mut self,
595        rwlock_ref: RwLockRef,
596        retval: Scalar,
597        dest: MPlaceTy<'tcx>,
598    ) {
599        let this = self.eval_context_mut();
600        let thread = this.active_thread();
601        let mut rwlock = rwlock_ref.0.borrow_mut();
602        rwlock.writer_queue.push_back(thread);
603        assert!(rwlock.is_locked(), "write-queueing on unlocked rwlock");
604        drop(rwlock);
605        this.block_thread(
606            BlockReason::RwLock,
607            None,
608            callback!(
609                @capture<'tcx> {
610                    rwlock_ref: RwLockRef,
611                    retval: Scalar,
612                    dest: MPlaceTy<'tcx>,
613                }
614                |this, unblock: UnblockKind| {
615                    assert_eq!(unblock, UnblockKind::Ready);
616                    this.rwlock_writer_lock(&rwlock_ref);
617                    this.write_scalar(retval, &dest)?;
618                    interp_ok(())
619                }
620            ),
621        );
622    }
623
624    /// Release the mutex and let the current thread wait on the given condition variable.
625    /// Once it is signaled, the mutex will be acquired and `retval_succ` will be written to `dest`.
626    /// If the timeout happens first, `retval_timeout` will be written to `dest`.
627    fn condvar_wait(
628        &mut self,
629        condvar_ref: CondvarRef,
630        mutex_ref: MutexRef,
631        timeout: Option<(TimeoutClock, TimeoutAnchor, Duration)>,
632        retval_succ: Scalar,
633        retval_timeout: Scalar,
634        dest: MPlaceTy<'tcx>,
635    ) -> InterpResult<'tcx> {
636        let this = self.eval_context_mut();
637        if let Some(old_locked_count) = this.mutex_unlock(&mutex_ref)? {
638            if old_locked_count != 1 {
639                throw_unsup_format!(
640                    "awaiting a condvar on a mutex acquired multiple times is not supported"
641                );
642            }
643        } else {
644            throw_ub_format!(
645                "awaiting a condvar on a mutex that is unlocked or owned by a different thread"
646            );
647        }
648        let thread = this.active_thread();
649
650        condvar_ref.0.borrow_mut().waiters.push_back(thread);
651        this.block_thread(
652            BlockReason::Condvar,
653            timeout,
654            callback!(
655                @capture<'tcx> {
656                    condvar_ref: CondvarRef,
657                    mutex_ref: MutexRef,
658                    retval_succ: Scalar,
659                    retval_timeout: Scalar,
660                    dest: MPlaceTy<'tcx>,
661                }
662                |this, unblock: UnblockKind| {
663                    match unblock {
664                        UnblockKind::Ready => {
665                            // The condvar was signaled. Make sure we get the clock for that.
666                            if let Some(data_race) = this.machine.data_race.as_vclocks_ref() {
667                                data_race.acquire_clock(
668                                    &condvar_ref.0.borrow().clock,
669                                    &this.machine.threads,
670                                );
671                            }
672                            // Try to acquire the mutex.
673                            // The timeout only applies to the first wait (until the signal), not for mutex acquisition.
674                            this.condvar_reacquire_mutex(mutex_ref, retval_succ, dest)
675                        }
676                        UnblockKind::TimedOut => {
677                            // We have to remove the waiter from the queue again.
678                            let thread = this.active_thread();
679                            let waiters = &mut condvar_ref.0.borrow_mut().waiters;
680                            waiters.retain(|waiter| *waiter != thread);
681                            // Now get back the lock.
682                            this.condvar_reacquire_mutex(mutex_ref, retval_timeout, dest)
683                        }
684                    }
685                }
686            ),
687        );
688        interp_ok(())
689    }
690
691    /// Wake up some thread (if there is any) sleeping on the conditional
692    /// variable. Returns `true` iff any thread was woken up.
693    fn condvar_signal(&mut self, condvar_ref: &CondvarRef) -> InterpResult<'tcx, bool> {
694        let this = self.eval_context_mut();
695        let mut condvar = condvar_ref.0.borrow_mut();
696
697        // Each condvar signal happens-before the end of the condvar wake
698        if let Some(data_race) = this.machine.data_race.as_vclocks_ref() {
699            data_race.release_clock(&this.machine.threads, |clock| condvar.clock.clone_from(clock));
700        }
701        let Some(waiter) = condvar.waiters.pop_front() else {
702            return interp_ok(false);
703        };
704        drop(condvar);
705        this.unblock_thread(waiter, BlockReason::Condvar)?;
706        interp_ok(true)
707    }
708
709    /// Wait for the futex to be signaled, or a timeout. Once the thread is
710    /// unblocked, `callback` is called with the unblock reason.
711    fn futex_wait(
712        &mut self,
713        futex_ref: FutexRef,
714        bitset: u32,
715        timeout: Option<(TimeoutClock, TimeoutAnchor, Duration)>,
716        callback: DynUnblockCallback<'tcx>,
717    ) {
718        let this = self.eval_context_mut();
719        let thread = this.active_thread();
720        let mut futex = futex_ref.0.borrow_mut();
721        let waiters = &mut futex.waiters;
722        assert!(waiters.iter().all(|waiter| waiter.thread != thread), "thread is already waiting");
723        waiters.push(FutexWaiter { thread, bitset });
724        drop(futex);
725
726        this.block_thread(
727            BlockReason::Futex,
728            timeout,
729            callback!(
730                @capture<'tcx> {
731                    futex_ref: FutexRef,
732                    callback: DynUnblockCallback<'tcx>,
733                }
734                |this, unblock: UnblockKind| {
735                    match unblock {
736                        UnblockKind::Ready => {
737                            let futex = futex_ref.0.borrow();
738                            // Acquire the clock of the futex.
739                            if let Some(data_race) = this.machine.data_race.as_vclocks_ref() {
740                                data_race.acquire_clock(&futex.clock, &this.machine.threads);
741                            }
742                        },
743                        UnblockKind::TimedOut => {
744                            // Remove the waiter from the futex.
745                            let thread = this.active_thread();
746                            let mut futex = futex_ref.0.borrow_mut();
747                            futex.waiters.retain(|waiter| waiter.thread != thread);
748                        },
749                    }
750
751                    callback.call(this, unblock)
752                }
753            ),
754        );
755    }
756
757    /// Wake up `count` of the threads in the queue that match any of the bits
758    /// in the bitset. Returns how many threads were woken.
759    fn futex_wake(
760        &mut self,
761        futex_ref: &FutexRef,
762        bitset: u32,
763        count: usize,
764    ) -> InterpResult<'tcx, usize> {
765        let this = self.eval_context_mut();
766        let mut futex = futex_ref.0.borrow_mut();
767
768        // Each futex-wake happens-before the end of the futex wait
769        if let Some(data_race) = this.machine.data_race.as_vclocks_ref() {
770            data_race.release_clock(&this.machine.threads, |clock| futex.clock.clone_from(clock));
771        }
772
773        // Remove `count` of the threads in the queue that match any of the bits in the bitset.
774        // We collect all of them before unblocking because the unblock callback may access the
775        // futex state to retrieve the remaining number of waiters on macOS.
776        let waiters: Vec<_> =
777            futex.waiters.extract_if(.., |w| w.bitset & bitset != 0).take(count).collect();
778        drop(futex);
779
780        let woken = waiters.len();
781        for waiter in waiters {
782            this.unblock_thread(waiter.thread, BlockReason::Futex)?;
783        }
784
785        interp_ok(woken)
786    }
787}