1use rustc_abi::Size;
2
3use crate::concurrency::sync::LAZY_INIT_COOKIE;
4use crate::*;
5
6fn bytewise_equal_atomic_relaxed<'tcx>(
11 ecx: &MiriInterpCx<'tcx>,
12 left: &MPlaceTy<'tcx>,
13 right: &MPlaceTy<'tcx>,
14) -> InterpResult<'tcx, bool> {
15 let size = left.layout.size;
16 assert_eq!(size, right.layout.size);
17
18 assert!(size.bytes().is_multiple_of(4));
21 for i in 0..(size.bytes() / 4) {
22 let offset = Size::from_bytes(i.strict_mul(4));
23 let load = |place: &MPlaceTy<'tcx>| {
24 let byte = place.offset(offset, ecx.machine.layouts.u32, ecx)?;
25 ecx.read_scalar_atomic(&byte, AtomicReadOrd::Relaxed)?.to_u32()
26 };
27 let left = load(left)?;
28 let right = load(right)?;
29 if left != right {
30 return interp_ok(false);
31 }
32 }
33
34 interp_ok(true)
35}
36
37#[inline]
42fn mutexattr_kind_offset<'tcx>(ecx: &MiriInterpCx<'tcx>) -> InterpResult<'tcx, u64> {
43 interp_ok(match &*ecx.tcx.sess.target.os {
44 "linux" | "illumos" | "solaris" | "macos" | "freebsd" | "android" => 0,
45 os => throw_unsup_format!("`pthread_mutexattr` is not supported on {os}"),
46 })
47}
48
49fn mutexattr_get_kind<'tcx>(
50 ecx: &MiriInterpCx<'tcx>,
51 attr_ptr: &OpTy<'tcx>,
52) -> InterpResult<'tcx, i32> {
53 ecx.deref_pointer_and_read(
54 attr_ptr,
55 mutexattr_kind_offset(ecx)?,
56 ecx.libc_ty_layout("pthread_mutexattr_t"),
57 ecx.machine.layouts.i32,
58 )?
59 .to_i32()
60}
61
62fn mutexattr_set_kind<'tcx>(
63 ecx: &mut MiriInterpCx<'tcx>,
64 attr_ptr: &OpTy<'tcx>,
65 kind: i32,
66) -> InterpResult<'tcx, ()> {
67 ecx.deref_pointer_and_write(
68 attr_ptr,
69 mutexattr_kind_offset(ecx)?,
70 Scalar::from_i32(kind),
71 ecx.libc_ty_layout("pthread_mutexattr_t"),
72 ecx.machine.layouts.i32,
73 )
74}
75
76const PTHREAD_MUTEX_KIND_UNCHANGED: i32 = 0x8000000;
81
82fn mutexattr_translate_kind<'tcx>(
84 ecx: &MiriInterpCx<'tcx>,
85 kind: i32,
86) -> InterpResult<'tcx, MutexKind> {
87 interp_ok(if kind == (ecx.eval_libc_i32("PTHREAD_MUTEX_NORMAL")) {
88 MutexKind::Normal
89 } else if kind == ecx.eval_libc_i32("PTHREAD_MUTEX_ERRORCHECK") {
90 MutexKind::ErrorCheck
91 } else if kind == ecx.eval_libc_i32("PTHREAD_MUTEX_RECURSIVE") {
92 MutexKind::Recursive
93 } else if kind == ecx.eval_libc_i32("PTHREAD_MUTEX_DEFAULT")
94 || kind == PTHREAD_MUTEX_KIND_UNCHANGED
95 {
96 MutexKind::Default
99 } else {
100 throw_unsup_format!("unsupported type of mutex: {kind}");
101 })
102}
103
104#[derive(Debug, Clone, Copy)]
110enum MutexKind {
111 Normal,
112 Default,
113 Recursive,
114 ErrorCheck,
115}
116
117#[derive(Debug, Clone)]
118struct PthreadMutex {
119 mutex_ref: MutexRef,
120 kind: MutexKind,
121}
122
123fn mutex_init_offset<'tcx>(ecx: &MiriInterpCx<'tcx>) -> InterpResult<'tcx, Size> {
127 let offset = match &*ecx.tcx.sess.target.os {
128 "linux" | "illumos" | "solaris" | "freebsd" | "android" => 0,
129 "macos" => 4,
131 os => throw_unsup_format!("`pthread_mutex` is not supported on {os}"),
132 };
133 let offset = Size::from_bytes(offset);
134
135 if !ecx.machine.pthread_mutex_sanity.replace(true) {
138 let check_static_initializer = |name| {
139 let static_initializer = ecx.eval_path(&["libc", name]);
140 let init_field =
141 static_initializer.offset(offset, ecx.machine.layouts.u32, ecx).unwrap();
142 let init = ecx.read_scalar(&init_field).unwrap().to_u32().unwrap();
143 assert_ne!(
144 init, LAZY_INIT_COOKIE,
145 "{name} is incompatible with our initialization cookie"
146 );
147 };
148
149 check_static_initializer("PTHREAD_MUTEX_INITIALIZER");
150 match &*ecx.tcx.sess.target.os {
152 "linux" => {
153 check_static_initializer("PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP");
154 check_static_initializer("PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP");
155 check_static_initializer("PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP");
156 }
157 "illumos" | "solaris" | "macos" | "freebsd" | "android" => {
158 }
160 os => throw_unsup_format!("`pthread_mutex` is not supported on {os}"),
161 }
162 }
163
164 interp_ok(offset)
165}
166
167fn mutex_create<'tcx>(
169 ecx: &mut MiriInterpCx<'tcx>,
170 mutex_ptr: &OpTy<'tcx>,
171 kind: MutexKind,
172) -> InterpResult<'tcx, PthreadMutex> {
173 let mutex = ecx.deref_pointer_as(mutex_ptr, ecx.libc_ty_layout("pthread_mutex_t"))?;
174 let data = PthreadMutex { mutex_ref: MutexRef::new(), kind };
175 ecx.lazy_sync_init(&mutex, mutex_init_offset(ecx)?, data.clone())?;
176 interp_ok(data)
177}
178
179fn mutex_get_data<'tcx, 'a>(
182 ecx: &'a mut MiriInterpCx<'tcx>,
183 mutex_ptr: &OpTy<'tcx>,
184) -> InterpResult<'tcx, &'a PthreadMutex>
185where
186 'tcx: 'a,
187{
188 let mutex = ecx.deref_pointer_as(mutex_ptr, ecx.libc_ty_layout("pthread_mutex_t"))?;
189 ecx.lazy_sync_get_data(
190 &mutex,
191 mutex_init_offset(ecx)?,
192 || throw_ub_format!("`pthread_mutex_t` can't be moved after first use"),
193 |ecx| {
194 let kind = mutex_kind_from_static_initializer(ecx, &mutex)?;
195 interp_ok(PthreadMutex { mutex_ref: MutexRef::new(), kind })
196 },
197 )
198}
199
200fn mutex_kind_from_static_initializer<'tcx>(
202 ecx: &MiriInterpCx<'tcx>,
203 mutex: &MPlaceTy<'tcx>,
204) -> InterpResult<'tcx, MutexKind> {
205 let is_initializer =
207 |name| bytewise_equal_atomic_relaxed(ecx, mutex, &ecx.eval_path(&["libc", name]));
208
209 if is_initializer("PTHREAD_MUTEX_INITIALIZER")? {
211 return interp_ok(MutexKind::Default);
212 }
213 match &*ecx.tcx.sess.target.os {
215 "linux" =>
216 if is_initializer("PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP")? {
217 return interp_ok(MutexKind::Recursive);
218 } else if is_initializer("PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP")? {
219 return interp_ok(MutexKind::ErrorCheck);
220 },
221 _ => {}
222 }
223 throw_unsup_format!("unsupported static initializer used for `pthread_mutex_t`");
224}
225
226#[derive(Debug, Clone)]
231struct PthreadRwLock {
232 rwlock_ref: RwLockRef,
233}
234
235fn rwlock_init_offset<'tcx>(ecx: &MiriInterpCx<'tcx>) -> InterpResult<'tcx, Size> {
236 let offset = match &*ecx.tcx.sess.target.os {
237 "linux" | "illumos" | "solaris" | "freebsd" | "android" => 0,
238 "macos" => 4,
240 os => throw_unsup_format!("`pthread_rwlock` is not supported on {os}"),
241 };
242 let offset = Size::from_bytes(offset);
243
244 if !ecx.machine.pthread_rwlock_sanity.replace(true) {
247 let static_initializer = ecx.eval_path(&["libc", "PTHREAD_RWLOCK_INITIALIZER"]);
248 let init_field = static_initializer.offset(offset, ecx.machine.layouts.u32, ecx).unwrap();
249 let init = ecx.read_scalar(&init_field).unwrap().to_u32().unwrap();
250 assert_ne!(
251 init, LAZY_INIT_COOKIE,
252 "PTHREAD_RWLOCK_INITIALIZER is incompatible with our initialization cookie"
253 );
254 }
255
256 interp_ok(offset)
257}
258
259fn rwlock_get_data<'tcx, 'a>(
260 ecx: &'a mut MiriInterpCx<'tcx>,
261 rwlock_ptr: &OpTy<'tcx>,
262) -> InterpResult<'tcx, &'a PthreadRwLock>
263where
264 'tcx: 'a,
265{
266 let rwlock = ecx.deref_pointer_as(rwlock_ptr, ecx.libc_ty_layout("pthread_rwlock_t"))?;
267 ecx.lazy_sync_get_data(
268 &rwlock,
269 rwlock_init_offset(ecx)?,
270 || throw_ub_format!("`pthread_rwlock_t` can't be moved after first use"),
271 |ecx| {
272 if !bytewise_equal_atomic_relaxed(
273 ecx,
274 &rwlock,
275 &ecx.eval_path(&["libc", "PTHREAD_RWLOCK_INITIALIZER"]),
276 )? {
277 throw_unsup_format!("unsupported static initializer used for `pthread_rwlock_t`");
278 }
279 interp_ok(PthreadRwLock { rwlock_ref: RwLockRef::new() })
280 },
281 )
282}
283
284#[inline]
289fn condattr_clock_offset<'tcx>(ecx: &MiriInterpCx<'tcx>) -> InterpResult<'tcx, u64> {
290 interp_ok(match &*ecx.tcx.sess.target.os {
291 "linux" | "illumos" | "solaris" | "freebsd" | "android" => 0,
292 os => throw_unsup_format!("`pthread_condattr` clock field is not supported on {os}"),
294 })
295}
296
297fn condattr_get_clock_id<'tcx>(
298 ecx: &MiriInterpCx<'tcx>,
299 attr_ptr: &OpTy<'tcx>,
300) -> InterpResult<'tcx, i32> {
301 ecx.deref_pointer_and_read(
302 attr_ptr,
303 condattr_clock_offset(ecx)?,
304 ecx.libc_ty_layout("pthread_condattr_t"),
305 ecx.machine.layouts.i32,
306 )?
307 .to_i32()
308}
309
310fn condattr_set_clock_id<'tcx>(
311 ecx: &mut MiriInterpCx<'tcx>,
312 attr_ptr: &OpTy<'tcx>,
313 clock_id: i32,
314) -> InterpResult<'tcx, ()> {
315 ecx.deref_pointer_and_write(
316 attr_ptr,
317 condattr_clock_offset(ecx)?,
318 Scalar::from_i32(clock_id),
319 ecx.libc_ty_layout("pthread_condattr_t"),
320 ecx.machine.layouts.i32,
321 )
322}
323
324fn condattr_translate_clock_id<'tcx>(
326 ecx: &MiriInterpCx<'tcx>,
327 raw_id: i32,
328) -> InterpResult<'tcx, ClockId> {
329 interp_ok(if raw_id == ecx.eval_libc_i32("CLOCK_REALTIME") {
330 ClockId::Realtime
331 } else if raw_id == ecx.eval_libc_i32("CLOCK_MONOTONIC") {
332 ClockId::Monotonic
333 } else {
334 throw_unsup_format!("unsupported clock id: {raw_id}");
335 })
336}
337
338fn cond_init_offset<'tcx>(ecx: &MiriInterpCx<'tcx>) -> InterpResult<'tcx, Size> {
343 let offset = match &*ecx.tcx.sess.target.os {
344 "linux" | "illumos" | "solaris" | "freebsd" | "android" => 0,
345 "macos" => 4,
347 os => throw_unsup_format!("`pthread_cond` is not supported on {os}"),
348 };
349 let offset = Size::from_bytes(offset);
350
351 if !ecx.machine.pthread_condvar_sanity.replace(true) {
354 let static_initializer = ecx.eval_path(&["libc", "PTHREAD_COND_INITIALIZER"]);
355 let init_field = static_initializer.offset(offset, ecx.machine.layouts.u32, ecx).unwrap();
356 let init = ecx.read_scalar(&init_field).unwrap().to_u32().unwrap();
357 assert_ne!(
358 init, LAZY_INIT_COOKIE,
359 "PTHREAD_COND_INITIALIZER is incompatible with our initialization cookie"
360 );
361 }
362
363 interp_ok(offset)
364}
365
366#[derive(Debug, Clone, Copy)]
367enum ClockId {
368 Realtime,
369 Monotonic,
370}
371
372#[derive(Debug, Clone)]
373struct PthreadCondvar {
374 condvar_ref: CondvarRef,
375 clock: ClockId,
376}
377
378fn cond_create<'tcx>(
379 ecx: &mut MiriInterpCx<'tcx>,
380 cond_ptr: &OpTy<'tcx>,
381 clock: ClockId,
382) -> InterpResult<'tcx, PthreadCondvar> {
383 let cond = ecx.deref_pointer_as(cond_ptr, ecx.libc_ty_layout("pthread_cond_t"))?;
384 let data = PthreadCondvar { condvar_ref: CondvarRef::new(), clock };
385 ecx.lazy_sync_init(&cond, cond_init_offset(ecx)?, data.clone())?;
386 interp_ok(data)
387}
388
389fn cond_get_data<'tcx, 'a>(
390 ecx: &'a mut MiriInterpCx<'tcx>,
391 cond_ptr: &OpTy<'tcx>,
392) -> InterpResult<'tcx, &'a PthreadCondvar>
393where
394 'tcx: 'a,
395{
396 let cond = ecx.deref_pointer_as(cond_ptr, ecx.libc_ty_layout("pthread_cond_t"))?;
397 ecx.lazy_sync_get_data(
398 &cond,
399 cond_init_offset(ecx)?,
400 || throw_ub_format!("`pthread_cond_t` can't be moved after first use"),
401 |ecx| {
402 if !bytewise_equal_atomic_relaxed(
403 ecx,
404 &cond,
405 &ecx.eval_path(&["libc", "PTHREAD_COND_INITIALIZER"]),
406 )? {
407 throw_unsup_format!("unsupported static initializer used for `pthread_cond_t`");
408 }
409 interp_ok(PthreadCondvar { condvar_ref: CondvarRef::new(), clock: ClockId::Realtime })
411 },
412 )
413}
414
415impl<'tcx> EvalContextExt<'tcx> for crate::MiriInterpCx<'tcx> {}
416pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
417 fn pthread_mutexattr_init(&mut self, attr_op: &OpTy<'tcx>) -> InterpResult<'tcx, ()> {
418 let this = self.eval_context_mut();
419
420 mutexattr_set_kind(this, attr_op, PTHREAD_MUTEX_KIND_UNCHANGED)?;
421
422 interp_ok(())
423 }
424
425 fn pthread_mutexattr_settype(
426 &mut self,
427 attr_op: &OpTy<'tcx>,
428 kind_op: &OpTy<'tcx>,
429 ) -> InterpResult<'tcx, Scalar> {
430 let this = self.eval_context_mut();
431
432 let kind = this.read_scalar(kind_op)?.to_i32()?;
433 if kind == this.eval_libc_i32("PTHREAD_MUTEX_NORMAL")
434 || kind == this.eval_libc_i32("PTHREAD_MUTEX_DEFAULT")
435 || kind == this.eval_libc_i32("PTHREAD_MUTEX_ERRORCHECK")
436 || kind == this.eval_libc_i32("PTHREAD_MUTEX_RECURSIVE")
437 {
438 assert_ne!(kind, PTHREAD_MUTEX_KIND_UNCHANGED);
440 mutexattr_set_kind(this, attr_op, kind)?;
441 } else {
442 let einval = this.eval_libc_i32("EINVAL");
443 return interp_ok(Scalar::from_i32(einval));
444 }
445
446 interp_ok(Scalar::from_i32(0))
447 }
448
449 fn pthread_mutexattr_destroy(&mut self, attr_op: &OpTy<'tcx>) -> InterpResult<'tcx, ()> {
450 let this = self.eval_context_mut();
451
452 mutexattr_get_kind(this, attr_op)?;
454
455 this.write_uninit(
468 &this.deref_pointer_as(attr_op, this.libc_ty_layout("pthread_mutexattr_t"))?,
469 )?;
470
471 interp_ok(())
472 }
473
474 fn pthread_mutex_init(
475 &mut self,
476 mutex_op: &OpTy<'tcx>,
477 attr_op: &OpTy<'tcx>,
478 ) -> InterpResult<'tcx, ()> {
479 let this = self.eval_context_mut();
480
481 let attr = this.read_pointer(attr_op)?;
482 let kind = if this.ptr_is_null(attr)? {
483 MutexKind::Default
484 } else {
485 mutexattr_translate_kind(this, mutexattr_get_kind(this, attr_op)?)?
486 };
487
488 mutex_create(this, mutex_op, kind)?;
489
490 interp_ok(())
491 }
492
493 fn pthread_mutex_lock(
494 &mut self,
495 mutex_op: &OpTy<'tcx>,
496 dest: &MPlaceTy<'tcx>,
497 ) -> InterpResult<'tcx> {
498 let this = self.eval_context_mut();
499
500 let mutex = mutex_get_data(this, mutex_op)?.clone();
501
502 let ret = if let Some(owner_thread) = mutex.mutex_ref.owner() {
503 if owner_thread != this.active_thread() {
504 this.mutex_enqueue_and_block(
505 mutex.mutex_ref,
506 Some((Scalar::from_i32(0), dest.clone())),
507 );
508 return interp_ok(());
509 } else {
510 match mutex.kind {
512 MutexKind::Default =>
513 throw_ub_format!(
514 "trying to acquire default mutex already locked by the current thread"
515 ),
516 MutexKind::Normal => throw_machine_stop!(TerminationInfo::Deadlock),
517 MutexKind::ErrorCheck => this.eval_libc_i32("EDEADLK"),
518 MutexKind::Recursive => {
519 this.mutex_lock(&mutex.mutex_ref);
520 0
521 }
522 }
523 }
524 } else {
525 this.mutex_lock(&mutex.mutex_ref);
527 0
528 };
529 this.write_scalar(Scalar::from_i32(ret), dest)?;
530 interp_ok(())
531 }
532
533 fn pthread_mutex_trylock(&mut self, mutex_op: &OpTy<'tcx>) -> InterpResult<'tcx, Scalar> {
534 let this = self.eval_context_mut();
535
536 let mutex = mutex_get_data(this, mutex_op)?.clone();
537
538 interp_ok(Scalar::from_i32(if let Some(owner_thread) = mutex.mutex_ref.owner() {
539 if owner_thread != this.active_thread() {
540 this.eval_libc_i32("EBUSY")
541 } else {
542 match mutex.kind {
543 MutexKind::Default | MutexKind::Normal | MutexKind::ErrorCheck =>
544 this.eval_libc_i32("EBUSY"),
545 MutexKind::Recursive => {
546 this.mutex_lock(&mutex.mutex_ref);
547 0
548 }
549 }
550 }
551 } else {
552 this.mutex_lock(&mutex.mutex_ref);
554 0
555 }))
556 }
557
558 fn pthread_mutex_unlock(&mut self, mutex_op: &OpTy<'tcx>) -> InterpResult<'tcx, Scalar> {
559 let this = self.eval_context_mut();
560
561 let mutex = mutex_get_data(this, mutex_op)?.clone();
562
563 if let Some(_old_locked_count) = this.mutex_unlock(&mutex.mutex_ref)? {
564 interp_ok(Scalar::from_i32(0))
566 } else {
567 match mutex.kind {
571 MutexKind::Default =>
572 throw_ub_format!(
573 "unlocked a default mutex that was not locked by the current thread"
574 ),
575 MutexKind::Normal =>
576 throw_ub_format!(
577 "unlocked a PTHREAD_MUTEX_NORMAL mutex that was not locked by the current thread"
578 ),
579 MutexKind::ErrorCheck | MutexKind::Recursive =>
580 interp_ok(Scalar::from_i32(this.eval_libc_i32("EPERM"))),
581 }
582 }
583 }
584
585 fn pthread_mutex_destroy(&mut self, mutex_op: &OpTy<'tcx>) -> InterpResult<'tcx, ()> {
586 let this = self.eval_context_mut();
587
588 let mutex = mutex_get_data(this, mutex_op)?.clone();
591
592 if mutex.mutex_ref.owner().is_some() {
593 throw_ub_format!("destroyed a locked mutex");
594 }
595
596 this.write_uninit(
598 &this.deref_pointer_as(mutex_op, this.libc_ty_layout("pthread_mutex_t"))?,
599 )?;
600 interp_ok(())
603 }
604
605 fn pthread_rwlock_rdlock(
606 &mut self,
607 rwlock_op: &OpTy<'tcx>,
608 dest: &MPlaceTy<'tcx>,
609 ) -> InterpResult<'tcx> {
610 let this = self.eval_context_mut();
611
612 let rwlock = rwlock_get_data(this, rwlock_op)?.clone();
613
614 if rwlock.rwlock_ref.is_write_locked() {
615 this.rwlock_enqueue_and_block_reader(
616 rwlock.rwlock_ref,
617 Scalar::from_i32(0),
618 dest.clone(),
619 );
620 } else {
621 this.rwlock_reader_lock(&rwlock.rwlock_ref);
622 this.write_null(dest)?;
623 }
624
625 interp_ok(())
626 }
627
628 fn pthread_rwlock_tryrdlock(&mut self, rwlock_op: &OpTy<'tcx>) -> InterpResult<'tcx, Scalar> {
629 let this = self.eval_context_mut();
630
631 let rwlock = rwlock_get_data(this, rwlock_op)?.clone();
632
633 if rwlock.rwlock_ref.is_write_locked() {
634 interp_ok(Scalar::from_i32(this.eval_libc_i32("EBUSY")))
635 } else {
636 this.rwlock_reader_lock(&rwlock.rwlock_ref);
637 interp_ok(Scalar::from_i32(0))
638 }
639 }
640
641 fn pthread_rwlock_wrlock(
642 &mut self,
643 rwlock_op: &OpTy<'tcx>,
644 dest: &MPlaceTy<'tcx>,
645 ) -> InterpResult<'tcx> {
646 let this = self.eval_context_mut();
647
648 let rwlock = rwlock_get_data(this, rwlock_op)?.clone();
649
650 if rwlock.rwlock_ref.is_locked() {
651 this.rwlock_enqueue_and_block_writer(
664 rwlock.rwlock_ref,
665 Scalar::from_i32(0),
666 dest.clone(),
667 );
668 } else {
669 this.rwlock_writer_lock(&rwlock.rwlock_ref);
670 this.write_null(dest)?;
671 }
672
673 interp_ok(())
674 }
675
676 fn pthread_rwlock_trywrlock(&mut self, rwlock_op: &OpTy<'tcx>) -> InterpResult<'tcx, Scalar> {
677 let this = self.eval_context_mut();
678
679 let rwlock = rwlock_get_data(this, rwlock_op)?.clone();
680
681 if rwlock.rwlock_ref.is_locked() {
682 interp_ok(Scalar::from_i32(this.eval_libc_i32("EBUSY")))
683 } else {
684 this.rwlock_writer_lock(&rwlock.rwlock_ref);
685 interp_ok(Scalar::from_i32(0))
686 }
687 }
688
689 fn pthread_rwlock_unlock(&mut self, rwlock_op: &OpTy<'tcx>) -> InterpResult<'tcx, ()> {
690 let this = self.eval_context_mut();
691
692 let rwlock = rwlock_get_data(this, rwlock_op)?.clone();
693
694 if this.rwlock_reader_unlock(&rwlock.rwlock_ref)?
695 || this.rwlock_writer_unlock(&rwlock.rwlock_ref)?
696 {
697 interp_ok(())
698 } else {
699 throw_ub_format!("unlocked an rwlock that was not locked by the active thread");
700 }
701 }
702
703 fn pthread_rwlock_destroy(&mut self, rwlock_op: &OpTy<'tcx>) -> InterpResult<'tcx, ()> {
704 let this = self.eval_context_mut();
705
706 let rwlock = rwlock_get_data(this, rwlock_op)?.clone();
709
710 if rwlock.rwlock_ref.is_locked() {
711 throw_ub_format!("destroyed a locked rwlock");
712 }
713
714 this.write_uninit(
716 &this.deref_pointer_as(rwlock_op, this.libc_ty_layout("pthread_rwlock_t"))?,
717 )?;
718 interp_ok(())
721 }
722
723 fn pthread_condattr_init(&mut self, attr_op: &OpTy<'tcx>) -> InterpResult<'tcx, ()> {
724 let this = self.eval_context_mut();
725
726 if this.tcx.sess.target.os != "macos" {
728 let default_clock_id = this.eval_libc_i32("CLOCK_REALTIME");
732 condattr_set_clock_id(this, attr_op, default_clock_id)?;
733 }
734
735 interp_ok(())
736 }
737
738 fn pthread_condattr_setclock(
739 &mut self,
740 attr_op: &OpTy<'tcx>,
741 clock_id_op: &OpTy<'tcx>,
742 ) -> InterpResult<'tcx, Scalar> {
743 let this = self.eval_context_mut();
744
745 let clock_id = this.read_scalar(clock_id_op)?.to_i32()?;
746 if clock_id == this.eval_libc_i32("CLOCK_REALTIME")
747 || clock_id == this.eval_libc_i32("CLOCK_MONOTONIC")
748 {
749 condattr_set_clock_id(this, attr_op, clock_id)?;
750 } else {
751 let einval = this.eval_libc_i32("EINVAL");
752 return interp_ok(Scalar::from_i32(einval));
753 }
754
755 interp_ok(Scalar::from_i32(0))
756 }
757
758 fn pthread_condattr_getclock(
759 &mut self,
760 attr_op: &OpTy<'tcx>,
761 clk_id_op: &OpTy<'tcx>,
762 ) -> InterpResult<'tcx, ()> {
763 let this = self.eval_context_mut();
764
765 let clock_id = condattr_get_clock_id(this, attr_op)?;
766 this.write_scalar(
767 Scalar::from_i32(clock_id),
768 &this.deref_pointer_as(clk_id_op, this.libc_ty_layout("clockid_t"))?,
769 )?;
770
771 interp_ok(())
772 }
773
774 fn pthread_condattr_destroy(&mut self, attr_op: &OpTy<'tcx>) -> InterpResult<'tcx, ()> {
775 let this = self.eval_context_mut();
776
777 if this.tcx.sess.target.os != "macos" {
780 condattr_get_clock_id(this, attr_op)?;
781 }
782
783 this.write_uninit(
786 &this.deref_pointer_as(attr_op, this.libc_ty_layout("pthread_condattr_t"))?,
787 )?;
788
789 interp_ok(())
790 }
791
792 fn pthread_cond_init(
793 &mut self,
794 cond_op: &OpTy<'tcx>,
795 attr_op: &OpTy<'tcx>,
796 ) -> InterpResult<'tcx, ()> {
797 let this = self.eval_context_mut();
798
799 let attr = this.read_pointer(attr_op)?;
800 let clock_id = if this.ptr_is_null(attr)? || this.tcx.sess.target.os == "macos" {
802 this.eval_libc_i32("CLOCK_REALTIME")
803 } else {
804 condattr_get_clock_id(this, attr_op)?
805 };
806 let clock_id = condattr_translate_clock_id(this, clock_id)?;
807
808 cond_create(this, cond_op, clock_id)?;
809
810 interp_ok(())
811 }
812
813 fn pthread_cond_signal(&mut self, cond_op: &OpTy<'tcx>) -> InterpResult<'tcx, ()> {
814 let this = self.eval_context_mut();
815 let condvar = cond_get_data(this, cond_op)?.condvar_ref.clone();
816 this.condvar_signal(&condvar)?;
817 interp_ok(())
818 }
819
820 fn pthread_cond_broadcast(&mut self, cond_op: &OpTy<'tcx>) -> InterpResult<'tcx, ()> {
821 let this = self.eval_context_mut();
822 let condvar = cond_get_data(this, cond_op)?.condvar_ref.clone();
823 while this.condvar_signal(&condvar)? {}
824 interp_ok(())
825 }
826
827 fn pthread_cond_wait(
828 &mut self,
829 cond_op: &OpTy<'tcx>,
830 mutex_op: &OpTy<'tcx>,
831 dest: &MPlaceTy<'tcx>,
832 ) -> InterpResult<'tcx> {
833 let this = self.eval_context_mut();
834
835 let data = cond_get_data(this, cond_op)?.clone();
836 let mutex_ref = mutex_get_data(this, mutex_op)?.mutex_ref.clone();
837
838 this.condvar_wait(
839 data.condvar_ref,
840 mutex_ref,
841 None, Scalar::from_i32(0),
843 Scalar::from_i32(0), dest.clone(),
845 )?;
846
847 interp_ok(())
848 }
849
850 fn pthread_cond_timedwait(
851 &mut self,
852 cond_op: &OpTy<'tcx>,
853 mutex_op: &OpTy<'tcx>,
854 abstime_op: &OpTy<'tcx>,
855 dest: &MPlaceTy<'tcx>,
856 ) -> InterpResult<'tcx> {
857 let this = self.eval_context_mut();
858
859 let data = cond_get_data(this, cond_op)?.clone();
860 let mutex_ref = mutex_get_data(this, mutex_op)?.mutex_ref.clone();
861
862 let duration = match this
864 .read_timespec(&this.deref_pointer_as(abstime_op, this.libc_ty_layout("timespec"))?)?
865 {
866 Some(duration) => duration,
867 None => {
868 let einval = this.eval_libc("EINVAL");
869 this.write_scalar(einval, dest)?;
870 return interp_ok(());
871 }
872 };
873 let timeout_clock = match data.clock {
874 ClockId::Realtime => {
875 this.check_no_isolation("`pthread_cond_timedwait` with `CLOCK_REALTIME`")?;
876 TimeoutClock::RealTime
877 }
878 ClockId::Monotonic => TimeoutClock::Monotonic,
879 };
880
881 this.condvar_wait(
882 data.condvar_ref,
883 mutex_ref,
884 Some((timeout_clock, TimeoutAnchor::Absolute, duration)),
885 Scalar::from_i32(0),
886 this.eval_libc("ETIMEDOUT"), dest.clone(),
888 )?;
889
890 interp_ok(())
891 }
892
893 fn pthread_cond_destroy(&mut self, cond_op: &OpTy<'tcx>) -> InterpResult<'tcx, ()> {
894 let this = self.eval_context_mut();
895
896 let condvar = &cond_get_data(this, cond_op)?.condvar_ref;
899 if condvar.is_awaited() {
900 throw_ub_format!("destroying an awaited conditional variable");
901 }
902
903 this.write_uninit(&this.deref_pointer_as(cond_op, this.libc_ty_layout("pthread_cond_t"))?)?;
905 interp_ok(())
908 }
909}