1use std::cell::Cell;
14use std::time::Duration;
15
16use rustc_abi::Size;
17
18use crate::concurrency::sync::FutexRef;
19use crate::*;
20
21#[derive(Clone)]
22enum MacOsUnfairLock {
23 Poisoned,
24 Active { mutex_ref: MutexRef },
25}
26
27pub enum MacOsFutexTimeout<'a, 'tcx> {
28 None,
29 Relative { clock_op: &'a OpTy<'tcx>, timeout_op: &'a OpTy<'tcx> },
30 Absolute { clock_op: &'a OpTy<'tcx>, timeout_op: &'a OpTy<'tcx> },
31}
32
33struct MacOsFutex {
40 futex: FutexRef,
41 size: Cell<u64>,
43 shared: Cell<bool>,
45}
46
47impl<'tcx> EvalContextExtPriv<'tcx> for crate::MiriInterpCx<'tcx> {}
48trait EvalContextExtPriv<'tcx>: crate::MiriInterpCxExt<'tcx> {
49 fn os_unfair_lock_get_data<'a>(
50 &'a mut self,
51 lock_ptr: &OpTy<'tcx>,
52 ) -> InterpResult<'tcx, &'a MacOsUnfairLock>
53 where
54 'tcx: 'a,
55 {
56 let this = self.eval_context_mut();
57 let lock = this.deref_pointer_as(lock_ptr, this.libc_ty_layout("os_unfair_lock_s"))?;
58 this.lazy_sync_get_data(
59 &lock,
60 Size::ZERO, || {
62 interp_ok(MacOsUnfairLock::Poisoned)
70 },
71 |_| interp_ok(MacOsUnfairLock::Active { mutex_ref: MutexRef::new() }),
72 )
73 }
74}
75
76impl<'tcx> EvalContextExt<'tcx> for crate::MiriInterpCx<'tcx> {}
77pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
78 fn os_sync_wait_on_address(
85 &mut self,
86 addr_op: &OpTy<'tcx>,
87 value_op: &OpTy<'tcx>,
88 size_op: &OpTy<'tcx>,
89 flags_op: &OpTy<'tcx>,
90 timeout: MacOsFutexTimeout<'_, 'tcx>,
91 dest: &MPlaceTy<'tcx>,
92 ) -> InterpResult<'tcx> {
93 let this = self.eval_context_mut();
94 let none = this.eval_libc_u32("OS_SYNC_WAIT_ON_ADDRESS_NONE");
95 let shared = this.eval_libc_u32("OS_SYNC_WAIT_ON_ADDRESS_SHARED");
96 let absolute_clock = this.eval_libc_u32("OS_CLOCK_MACH_ABSOLUTE_TIME");
97
98 let ptr = this.read_pointer(addr_op)?;
99 let value = this.read_scalar(value_op)?.to_u64()?;
100 let size = this.read_target_usize(size_op)?;
101 let flags = this.read_scalar(flags_op)?.to_u32()?;
102
103 let clock_timeout = match timeout {
104 MacOsFutexTimeout::None => None,
105 MacOsFutexTimeout::Relative { clock_op, timeout_op } => {
106 let clock = this.read_scalar(clock_op)?.to_u32()?;
107 let timeout = this.read_scalar(timeout_op)?.to_u64()?;
108 Some((clock, TimeoutAnchor::Relative, timeout))
109 }
110 MacOsFutexTimeout::Absolute { clock_op, timeout_op } => {
111 let clock = this.read_scalar(clock_op)?.to_u32()?;
112 let timeout = this.read_scalar(timeout_op)?.to_u64()?;
113 Some((clock, TimeoutAnchor::Absolute, timeout))
114 }
115 };
116
117 let addr = ptr.addr().bytes();
119 if addr == 0
120 || !matches!(size, 4 | 8)
121 || !addr.is_multiple_of(size)
122 || (flags != none && flags != shared)
123 || clock_timeout
124 .is_some_and(|(clock, _, timeout)| clock != absolute_clock || timeout == 0)
125 {
126 this.set_last_error_and_return(LibcError("EINVAL"), dest)?;
127 return interp_ok(());
128 }
129
130 let is_shared = flags == shared;
131 let timeout = clock_timeout.map(|(_, anchor, timeout)| {
132 (TimeoutClock::Monotonic, anchor, Duration::from_nanos(timeout))
137 });
138
139 this.atomic_fence(AtomicFenceOrd::SeqCst)?;
141
142 let layout = this.machine.layouts.uint(Size::from_bytes(size)).unwrap();
143 let futex_val = this
144 .read_scalar_atomic(&this.ptr_to_mplace(ptr, layout), AtomicReadOrd::Acquire)?
145 .to_bits(Size::from_bytes(size))?;
146
147 let futex = this
148 .get_sync_or_init(ptr, |_| {
149 MacOsFutex {
150 futex: Default::default(),
151 size: Cell::new(size),
152 shared: Cell::new(is_shared),
153 }
154 })
155 .unwrap();
156
157 if futex.futex.waiters() == 0 {
162 futex.size.set(size);
163 futex.shared.set(is_shared);
164 } else if futex.size.get() != size || futex.shared.get() != is_shared {
165 this.set_last_error_and_return(LibcError("EINVAL"), dest)?;
166 return interp_ok(());
167 }
168
169 if futex_val == value.into() {
170 let futex_ref = futex.futex.clone();
172 let dest = dest.clone();
173 this.futex_wait(
174 futex_ref.clone(),
175 u32::MAX, timeout,
177 callback!(
178 @capture<'tcx> {
179 dest: MPlaceTy<'tcx>,
180 futex_ref: FutexRef,
181 }
182 |this, unblock: UnblockKind| {
183 match unblock {
184 UnblockKind::Ready => {
185 let remaining = futex_ref.waiters().try_into().unwrap();
186 this.write_scalar(Scalar::from_i32(remaining), &dest)
187 }
188 UnblockKind::TimedOut => {
189 this.set_last_error_and_return(LibcError("ETIMEDOUT"), &dest)
190 }
191 }
192 }
193 ),
194 );
195 } else {
196 let waiters = futex.futex.waiters().try_into().unwrap();
198 this.write_scalar(Scalar::from_i32(waiters), dest)?;
199 }
200
201 interp_ok(())
202 }
203
204 fn os_sync_wake_by_address(
209 &mut self,
210 addr_op: &OpTy<'tcx>,
211 size_op: &OpTy<'tcx>,
212 flags_op: &OpTy<'tcx>,
213 all: bool,
214 dest: &MPlaceTy<'tcx>,
215 ) -> InterpResult<'tcx> {
216 let this = self.eval_context_mut();
217 let none = this.eval_libc_u32("OS_SYNC_WAKE_BY_ADDRESS_NONE");
218 let shared = this.eval_libc_u32("OS_SYNC_WAKE_BY_ADDRESS_SHARED");
219
220 let ptr = this.read_pointer(addr_op)?;
221 let size = this.read_target_usize(size_op)?;
222 let flags = this.read_scalar(flags_op)?.to_u32()?;
223
224 let addr = ptr.addr().bytes();
226 if addr == 0 || !matches!(size, 4 | 8) || (flags != none && flags != shared) {
227 this.set_last_error_and_return(LibcError("EINVAL"), dest)?;
228 return interp_ok(());
229 }
230
231 let is_shared = flags == shared;
232
233 let Some(futex) = this.get_sync_or_init(ptr, |_| {
234 MacOsFutex {
235 futex: Default::default(),
236 size: Cell::new(size),
237 shared: Cell::new(is_shared),
238 }
239 }) else {
240 this.set_last_error_and_return(LibcError("ENOENT"), dest)?;
246 return interp_ok(());
247 };
248
249 if futex.futex.waiters() == 0 {
250 this.set_last_error_and_return(LibcError("ENOENT"), dest)?;
251 return interp_ok(());
252 } else if futex.size.get() != size || futex.shared.get() != is_shared {
257 this.set_last_error_and_return(LibcError("EINVAL"), dest)?;
258 return interp_ok(());
259 }
260
261 let futex_ref = futex.futex.clone();
262
263 this.atomic_fence(AtomicFenceOrd::SeqCst)?;
265 this.futex_wake(&futex_ref, u32::MAX, if all { usize::MAX } else { 1 })?;
266 this.write_scalar(Scalar::from_i32(0), dest)?;
267 interp_ok(())
268 }
269
270 fn os_unfair_lock_lock(&mut self, lock_op: &OpTy<'tcx>) -> InterpResult<'tcx> {
271 let this = self.eval_context_mut();
272
273 let MacOsUnfairLock::Active { mutex_ref } = this.os_unfair_lock_get_data(lock_op)? else {
274 this.block_thread(
276 BlockReason::Sleep,
277 None,
278 callback!(
279 @capture<'tcx> {}
280 |_this, _unblock: UnblockKind| {
281 panic!("we shouldn't wake up ever")
282 }
283 ),
284 );
285 return interp_ok(());
286 };
287 let mutex_ref = mutex_ref.clone();
288
289 if let Some(owner) = mutex_ref.owner() {
290 if owner == this.active_thread() {
291 throw_machine_stop!(TerminationInfo::Abort(
293 "attempted to lock an os_unfair_lock that is already locked by the current thread".to_owned()
294 ));
295 }
296
297 this.mutex_enqueue_and_block(mutex_ref, None);
298 } else {
299 this.mutex_lock(&mutex_ref);
300 }
301
302 interp_ok(())
303 }
304
305 fn os_unfair_lock_trylock(
306 &mut self,
307 lock_op: &OpTy<'tcx>,
308 dest: &MPlaceTy<'tcx>,
309 ) -> InterpResult<'tcx> {
310 let this = self.eval_context_mut();
311
312 let MacOsUnfairLock::Active { mutex_ref } = this.os_unfair_lock_get_data(lock_op)? else {
313 this.write_scalar(Scalar::from_bool(false), dest)?;
315 return interp_ok(());
316 };
317 let mutex_ref = mutex_ref.clone();
318
319 if mutex_ref.owner().is_some() {
320 this.write_scalar(Scalar::from_bool(false), dest)?;
323 } else {
324 this.mutex_lock(&mutex_ref);
325 this.write_scalar(Scalar::from_bool(true), dest)?;
326 }
327
328 interp_ok(())
329 }
330
331 fn os_unfair_lock_unlock(&mut self, lock_op: &OpTy<'tcx>) -> InterpResult<'tcx> {
332 let this = self.eval_context_mut();
333
334 let MacOsUnfairLock::Active { mutex_ref } = this.os_unfair_lock_get_data(lock_op)? else {
335 throw_machine_stop!(TerminationInfo::Abort(
337 "attempted to unlock an os_unfair_lock not owned by the current thread".to_owned()
338 ));
339 };
340 let mutex_ref = mutex_ref.clone();
341
342 if this.mutex_unlock(&mutex_ref)?.is_none() {
344 throw_machine_stop!(TerminationInfo::Abort(
346 "attempted to unlock an os_unfair_lock not owned by the current thread".to_owned()
347 ));
348 }
349
350 if mutex_ref.owner().is_none() {
353 let lock_place = this.deref_pointer_as(lock_op, this.machine.layouts.u32)?;
354 this.write_scalar_atomic(Scalar::from_u32(0), &lock_place, AtomicWriteOrd::Relaxed)?;
355 }
356
357 interp_ok(())
358 }
359
360 fn os_unfair_lock_assert_owner(&mut self, lock_op: &OpTy<'tcx>) -> InterpResult<'tcx> {
361 let this = self.eval_context_mut();
362
363 let MacOsUnfairLock::Active { mutex_ref } = this.os_unfair_lock_get_data(lock_op)? else {
364 throw_machine_stop!(TerminationInfo::Abort(
366 "called os_unfair_lock_assert_owner on an os_unfair_lock not owned by the current thread".to_owned()
367 ));
368 };
369 let mutex_ref = mutex_ref.clone();
370
371 if mutex_ref.owner().is_none_or(|o| o != this.active_thread()) {
372 throw_machine_stop!(TerminationInfo::Abort(
373 "called os_unfair_lock_assert_owner on an os_unfair_lock not owned by the current thread".to_owned()
374 ));
375 }
376
377 interp_ok(())
380 }
381
382 fn os_unfair_lock_assert_not_owner(&mut self, lock_op: &OpTy<'tcx>) -> InterpResult<'tcx> {
383 let this = self.eval_context_mut();
384
385 let MacOsUnfairLock::Active { mutex_ref } = this.os_unfair_lock_get_data(lock_op)? else {
386 return interp_ok(());
388 };
389 let mutex_ref = mutex_ref.clone();
390
391 if mutex_ref.owner().is_some_and(|o| o == this.active_thread()) {
392 throw_machine_stop!(TerminationInfo::Abort(
393 "called os_unfair_lock_assert_not_owner on an os_unfair_lock owned by the current thread".to_owned()
394 ));
395 }
396
397 if mutex_ref.owner().is_none() {
400 let lock_place = this.deref_pointer_as(lock_op, this.machine.layouts.u32)?;
401 this.write_scalar_atomic(Scalar::from_u32(0), &lock_place, AtomicWriteOrd::Relaxed)?;
402 }
403
404 interp_ok(())
405 }
406}