miri/shims/unix/macos/
sync.rs

1//! Contains macOS-specific synchronization functions.
2//!
3//! For `os_unfair_lock`, see the documentation
4//! <https://developer.apple.com/documentation/os/synchronization?language=objc>
5//! and in case of underspecification its implementation
6//! <https://github.com/apple-oss-distributions/libplatform/blob/a00a4cc36da2110578bcf3b8eeeeb93dcc7f4e11/src/os/lock.c#L645>.
7//!
8//! Note that we don't emulate every edge-case behaviour of the locks. Notably,
9//! we don't abort when locking a lock owned by a thread that has already exited
10//! and we do not detect copying of the lock, but macOS doesn't guarantee anything
11//! in that case either.
12
13use std::cell::Cell;
14use std::time::Duration;
15
16use rustc_abi::Size;
17
18use crate::concurrency::sync::FutexRef;
19use crate::*;
20
21#[derive(Clone)]
22enum MacOsUnfairLock {
23    Poisoned,
24    Active { mutex_ref: MutexRef },
25}
26
27pub enum MacOsFutexTimeout<'a, 'tcx> {
28    None,
29    Relative { clock_op: &'a OpTy<'tcx>, timeout_op: &'a OpTy<'tcx> },
30    Absolute { clock_op: &'a OpTy<'tcx>, timeout_op: &'a OpTy<'tcx> },
31}
32
33/// Metadata for a macOS futex.
34///
35/// Since macOS 11.0, Apple has exposed the previously private futex API consisting
36/// of `os_sync_wait_on_address` (and friends) and `os_sync_wake_by_address_{any, all}`.
37/// These work with different value sizes and flags, which are validated to be consistent.
38/// This structure keeps track of both the futex queue and these values.
39struct MacOsFutex {
40    futex: FutexRef,
41    /// The size in bytes of the atomic primitive underlying this futex.
42    size: Cell<u64>,
43    /// Whether the futex is shared across process boundaries.
44    shared: Cell<bool>,
45}
46
47impl<'tcx> EvalContextExtPriv<'tcx> for crate::MiriInterpCx<'tcx> {}
48trait EvalContextExtPriv<'tcx>: crate::MiriInterpCxExt<'tcx> {
49    fn os_unfair_lock_get_data<'a>(
50        &'a mut self,
51        lock_ptr: &OpTy<'tcx>,
52    ) -> InterpResult<'tcx, &'a MacOsUnfairLock>
53    where
54        'tcx: 'a,
55    {
56        let this = self.eval_context_mut();
57        let lock = this.deref_pointer_as(lock_ptr, this.libc_ty_layout("os_unfair_lock_s"))?;
58        this.lazy_sync_get_data(
59            &lock,
60            Size::ZERO, // offset for init tracking
61            || {
62                // If we get here, due to how we reset things to zero in `os_unfair_lock_unlock`,
63                // this means the lock was moved while locked. This can happen with a `std` lock,
64                // but then any future attempt to unlock will just deadlock. In practice, terrible
65                // things can probably happen if you swap two locked locks, since they'd wake up
66                // from the wrong queue... we just won't catch all UB of this library API then (we
67                // would need to store some unique identifer in-memory for this, instead of a static
68                // LAZY_INIT_COOKIE). This can't be hit via `std::sync::Mutex`.
69                interp_ok(MacOsUnfairLock::Poisoned)
70            },
71            |_| interp_ok(MacOsUnfairLock::Active { mutex_ref: MutexRef::new() }),
72        )
73    }
74}
75
76impl<'tcx> EvalContextExt<'tcx> for crate::MiriInterpCx<'tcx> {}
77pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
78    /// Implements [`os_sync_wait_on_address`], [`os_sync_wait_on_address_with_deadline`]
79    /// and [`os_sync_wait_on_address_with_timeout`].
80    ///
81    /// [`os_sync_wait_on_address`]: https://developer.apple.com/documentation/os/os_sync_wait_on_address?language=objc
82    /// [`os_sync_wait_on_address_with_deadline`]: https://developer.apple.com/documentation/os/os_sync_wait_on_address_with_deadline?language=objc
83    /// [`os_sync_wait_on_address_with_timeout`]: https://developer.apple.com/documentation/os/os_sync_wait_on_address_with_timeout?language=objc
84    fn os_sync_wait_on_address(
85        &mut self,
86        addr_op: &OpTy<'tcx>,
87        value_op: &OpTy<'tcx>,
88        size_op: &OpTy<'tcx>,
89        flags_op: &OpTy<'tcx>,
90        timeout: MacOsFutexTimeout<'_, 'tcx>,
91        dest: &MPlaceTy<'tcx>,
92    ) -> InterpResult<'tcx> {
93        let this = self.eval_context_mut();
94        let none = this.eval_libc_u32("OS_SYNC_WAIT_ON_ADDRESS_NONE");
95        let shared = this.eval_libc_u32("OS_SYNC_WAIT_ON_ADDRESS_SHARED");
96        let absolute_clock = this.eval_libc_u32("OS_CLOCK_MACH_ABSOLUTE_TIME");
97
98        let ptr = this.read_pointer(addr_op)?;
99        let value = this.read_scalar(value_op)?.to_u64()?;
100        let size = this.read_target_usize(size_op)?;
101        let flags = this.read_scalar(flags_op)?.to_u32()?;
102
103        let clock_timeout = match timeout {
104            MacOsFutexTimeout::None => None,
105            MacOsFutexTimeout::Relative { clock_op, timeout_op } => {
106                let clock = this.read_scalar(clock_op)?.to_u32()?;
107                let timeout = this.read_scalar(timeout_op)?.to_u64()?;
108                Some((clock, TimeoutAnchor::Relative, timeout))
109            }
110            MacOsFutexTimeout::Absolute { clock_op, timeout_op } => {
111                let clock = this.read_scalar(clock_op)?.to_u32()?;
112                let timeout = this.read_scalar(timeout_op)?.to_u64()?;
113                Some((clock, TimeoutAnchor::Absolute, timeout))
114            }
115        };
116
117        // Perform validation of the arguments.
118        let addr = ptr.addr().bytes();
119        if addr == 0
120            || !matches!(size, 4 | 8)
121            || !addr.is_multiple_of(size)
122            || (flags != none && flags != shared)
123            || clock_timeout
124                .is_some_and(|(clock, _, timeout)| clock != absolute_clock || timeout == 0)
125        {
126            this.set_last_error_and_return(LibcError("EINVAL"), dest)?;
127            return interp_ok(());
128        }
129
130        let is_shared = flags == shared;
131        let timeout = clock_timeout.map(|(_, anchor, timeout)| {
132            // The only clock that is currenlty supported is the monotonic clock.
133            // While the deadline argument of `os_sync_wait_on_address_with_deadline`
134            // is actually not in nanoseconds but in the units of `mach_current_time`,
135            // the two are equivalent in miri.
136            (TimeoutClock::Monotonic, anchor, Duration::from_nanos(timeout))
137        });
138
139        // See the Linux futex implementation for why this fence exists.
140        this.atomic_fence(AtomicFenceOrd::SeqCst)?;
141
142        let layout = this.machine.layouts.uint(Size::from_bytes(size)).unwrap();
143        let futex_val = this
144            .read_scalar_atomic(&this.ptr_to_mplace(ptr, layout), AtomicReadOrd::Acquire)?
145            .to_bits(Size::from_bytes(size))?;
146
147        let futex = this
148            .get_sync_or_init(ptr, |_| {
149                MacOsFutex {
150                    futex: Default::default(),
151                    size: Cell::new(size),
152                    shared: Cell::new(is_shared),
153                }
154            })
155            .unwrap();
156
157        // Detect mismatches between the flags and sizes used on this address
158        // by comparing it with the parameters used by the other waiters in
159        // the current list. If the list is currently empty, update those
160        // parameters.
161        if futex.futex.waiters() == 0 {
162            futex.size.set(size);
163            futex.shared.set(is_shared);
164        } else if futex.size.get() != size || futex.shared.get() != is_shared {
165            this.set_last_error_and_return(LibcError("EINVAL"), dest)?;
166            return interp_ok(());
167        }
168
169        if futex_val == value.into() {
170            // If the values are the same, we have to block.
171            let futex_ref = futex.futex.clone();
172            let dest = dest.clone();
173            this.futex_wait(
174                futex_ref.clone(),
175                u32::MAX, // bitset
176                timeout,
177                callback!(
178                    @capture<'tcx> {
179                        dest: MPlaceTy<'tcx>,
180                        futex_ref: FutexRef,
181                    }
182                    |this, unblock: UnblockKind| {
183                        match unblock {
184                            UnblockKind::Ready => {
185                                let remaining = futex_ref.waiters().try_into().unwrap();
186                                this.write_scalar(Scalar::from_i32(remaining), &dest)
187                            }
188                            UnblockKind::TimedOut => {
189                                this.set_last_error_and_return(LibcError("ETIMEDOUT"), &dest)
190                            }
191                        }
192                    }
193                ),
194            );
195        } else {
196            // else retrieve the current number of waiters.
197            let waiters = futex.futex.waiters().try_into().unwrap();
198            this.write_scalar(Scalar::from_i32(waiters), dest)?;
199        }
200
201        interp_ok(())
202    }
203
204    /// Implements [`os_sync_wake_by_address_all`] and [`os_sync_wake_by_address_any`].
205    ///
206    /// [`os_sync_wake_by_address_all`]: https://developer.apple.com/documentation/os/os_sync_wake_by_address_all?language=objc
207    /// [`os_sync_wake_by_address_any`]: https://developer.apple.com/documentation/os/os_sync_wake_by_address_any?language=objc
208    fn os_sync_wake_by_address(
209        &mut self,
210        addr_op: &OpTy<'tcx>,
211        size_op: &OpTy<'tcx>,
212        flags_op: &OpTy<'tcx>,
213        all: bool,
214        dest: &MPlaceTy<'tcx>,
215    ) -> InterpResult<'tcx> {
216        let this = self.eval_context_mut();
217        let none = this.eval_libc_u32("OS_SYNC_WAKE_BY_ADDRESS_NONE");
218        let shared = this.eval_libc_u32("OS_SYNC_WAKE_BY_ADDRESS_SHARED");
219
220        let ptr = this.read_pointer(addr_op)?;
221        let size = this.read_target_usize(size_op)?;
222        let flags = this.read_scalar(flags_op)?.to_u32()?;
223
224        // Perform validation of the arguments.
225        let addr = ptr.addr().bytes();
226        if addr == 0 || !matches!(size, 4 | 8) || (flags != none && flags != shared) {
227            this.set_last_error_and_return(LibcError("EINVAL"), dest)?;
228            return interp_ok(());
229        }
230
231        let is_shared = flags == shared;
232
233        let Some(futex) = this.get_sync_or_init(ptr, |_| {
234            MacOsFutex {
235                futex: Default::default(),
236                size: Cell::new(size),
237                shared: Cell::new(is_shared),
238            }
239        }) else {
240            // No AllocId, or no live allocation at that AllocId. Return an
241            // error code. (That seems nicer than silently doing something
242            // non-intuitive.) This means that if an address gets reused by a
243            // new allocation, we'll use an independent futex queue for this...
244            // that seems acceptable.
245            this.set_last_error_and_return(LibcError("ENOENT"), dest)?;
246            return interp_ok(());
247        };
248
249        if futex.futex.waiters() == 0 {
250            this.set_last_error_and_return(LibcError("ENOENT"), dest)?;
251            return interp_ok(());
252        // If there are waiters in the queue, they have all used the parameters
253        // stored in `futex` (we check this in `os_sync_wait_on_address` above).
254        // Detect mismatches between "our" parameters and the parameters used by
255        // the waiters and return an error in that case.
256        } else if futex.size.get() != size || futex.shared.get() != is_shared {
257            this.set_last_error_and_return(LibcError("EINVAL"), dest)?;
258            return interp_ok(());
259        }
260
261        let futex_ref = futex.futex.clone();
262
263        // See the Linux futex implementation for why this fence exists.
264        this.atomic_fence(AtomicFenceOrd::SeqCst)?;
265        this.futex_wake(&futex_ref, u32::MAX, if all { usize::MAX } else { 1 })?;
266        this.write_scalar(Scalar::from_i32(0), dest)?;
267        interp_ok(())
268    }
269
270    fn os_unfair_lock_lock(&mut self, lock_op: &OpTy<'tcx>) -> InterpResult<'tcx> {
271        let this = self.eval_context_mut();
272
273        let MacOsUnfairLock::Active { mutex_ref } = this.os_unfair_lock_get_data(lock_op)? else {
274            // Trying to get a poisoned lock. Just block forever...
275            this.block_thread(
276                BlockReason::Sleep,
277                None,
278                callback!(
279                    @capture<'tcx> {}
280                    |_this, _unblock: UnblockKind| {
281                        panic!("we shouldn't wake up ever")
282                    }
283                ),
284            );
285            return interp_ok(());
286        };
287        let mutex_ref = mutex_ref.clone();
288
289        if let Some(owner) = mutex_ref.owner() {
290            if owner == this.active_thread() {
291                // Matching the current macOS implementation: abort on reentrant locking.
292                throw_machine_stop!(TerminationInfo::Abort(
293                    "attempted to lock an os_unfair_lock that is already locked by the current thread".to_owned()
294                ));
295            }
296
297            this.mutex_enqueue_and_block(mutex_ref, None);
298        } else {
299            this.mutex_lock(&mutex_ref);
300        }
301
302        interp_ok(())
303    }
304
305    fn os_unfair_lock_trylock(
306        &mut self,
307        lock_op: &OpTy<'tcx>,
308        dest: &MPlaceTy<'tcx>,
309    ) -> InterpResult<'tcx> {
310        let this = self.eval_context_mut();
311
312        let MacOsUnfairLock::Active { mutex_ref } = this.os_unfair_lock_get_data(lock_op)? else {
313            // Trying to get a poisoned lock. That never works.
314            this.write_scalar(Scalar::from_bool(false), dest)?;
315            return interp_ok(());
316        };
317        let mutex_ref = mutex_ref.clone();
318
319        if mutex_ref.owner().is_some() {
320            // Contrary to the blocking lock function, this does not check for
321            // reentrancy.
322            this.write_scalar(Scalar::from_bool(false), dest)?;
323        } else {
324            this.mutex_lock(&mutex_ref);
325            this.write_scalar(Scalar::from_bool(true), dest)?;
326        }
327
328        interp_ok(())
329    }
330
331    fn os_unfair_lock_unlock(&mut self, lock_op: &OpTy<'tcx>) -> InterpResult<'tcx> {
332        let this = self.eval_context_mut();
333
334        let MacOsUnfairLock::Active { mutex_ref } = this.os_unfair_lock_get_data(lock_op)? else {
335            // The lock is poisoned, who knows who owns it... we'll pretend: someone else.
336            throw_machine_stop!(TerminationInfo::Abort(
337                "attempted to unlock an os_unfair_lock not owned by the current thread".to_owned()
338            ));
339        };
340        let mutex_ref = mutex_ref.clone();
341
342        // Now, unlock.
343        if this.mutex_unlock(&mutex_ref)?.is_none() {
344            // Matching the current macOS implementation: abort.
345            throw_machine_stop!(TerminationInfo::Abort(
346                "attempted to unlock an os_unfair_lock not owned by the current thread".to_owned()
347            ));
348        }
349
350        // If the lock is not locked by anyone now, it went quiet.
351        // Reset to zero so that it can be moved and initialized again for the next phase.
352        if mutex_ref.owner().is_none() {
353            let lock_place = this.deref_pointer_as(lock_op, this.machine.layouts.u32)?;
354            this.write_scalar_atomic(Scalar::from_u32(0), &lock_place, AtomicWriteOrd::Relaxed)?;
355        }
356
357        interp_ok(())
358    }
359
360    fn os_unfair_lock_assert_owner(&mut self, lock_op: &OpTy<'tcx>) -> InterpResult<'tcx> {
361        let this = self.eval_context_mut();
362
363        let MacOsUnfairLock::Active { mutex_ref } = this.os_unfair_lock_get_data(lock_op)? else {
364            // The lock is poisoned, who knows who owns it... we'll pretend: someone else.
365            throw_machine_stop!(TerminationInfo::Abort(
366                "called os_unfair_lock_assert_owner on an os_unfair_lock not owned by the current thread".to_owned()
367            ));
368        };
369        let mutex_ref = mutex_ref.clone();
370
371        if mutex_ref.owner().is_none_or(|o| o != this.active_thread()) {
372            throw_machine_stop!(TerminationInfo::Abort(
373                "called os_unfair_lock_assert_owner on an os_unfair_lock not owned by the current thread".to_owned()
374            ));
375        }
376
377        // The lock is definitely not quiet since we are the owner.
378
379        interp_ok(())
380    }
381
382    fn os_unfair_lock_assert_not_owner(&mut self, lock_op: &OpTy<'tcx>) -> InterpResult<'tcx> {
383        let this = self.eval_context_mut();
384
385        let MacOsUnfairLock::Active { mutex_ref } = this.os_unfair_lock_get_data(lock_op)? else {
386            // The lock is poisoned, who knows who owns it... we'll pretend: someone else.
387            return interp_ok(());
388        };
389        let mutex_ref = mutex_ref.clone();
390
391        if mutex_ref.owner().is_some_and(|o| o == this.active_thread()) {
392            throw_machine_stop!(TerminationInfo::Abort(
393                "called os_unfair_lock_assert_not_owner on an os_unfair_lock owned by the current thread".to_owned()
394            ));
395        }
396
397        // If the lock is not locked by anyone now, it went quiet.
398        // Reset to zero so that it can be moved and initialized again for the next phase.
399        if mutex_ref.owner().is_none() {
400            let lock_place = this.deref_pointer_as(lock_op, this.machine.layouts.u32)?;
401            this.write_scalar_atomic(Scalar::from_u32(0), &lock_place, AtomicWriteOrd::Relaxed)?;
402        }
403
404        interp_ok(())
405    }
406}