1use std::cell::Cell;
2use std::collections::hash_map::DefaultHasher;
3use std::hash::Hasher;
4use std::sync::atomic::{AtomicUsize, Ordering};
5use std::sync::{Arc, Mutex, Once};
6use std::{fmt, io, mem, ptr, thread};
7
8use crossbeam_deque::{Injector, Steal, Stealer, Worker};
9
10use crate::job::{JobFifo, JobRef, StackJob};
11use crate::latch::{AsCoreLatch, CoreLatch, Latch, LatchRef, LockLatch, OnceLatch, SpinLatch};
12use crate::sleep::Sleep;
13use crate::tlv::Tlv;
14use crate::{
15 AcquireThreadHandler, DeadlockHandler, ErrorKind, ExitHandler, PanicHandler,
16 ReleaseThreadHandler, StartHandler, ThreadPoolBuildError, ThreadPoolBuilder, Yield, unwind,
17};
18
19pub struct ThreadBuilder {
22 name: Option<String>,
23 stack_size: Option<usize>,
24 worker: Worker<JobRef>,
25 stealer: Stealer<JobRef>,
26 registry: Arc<Registry>,
27 index: usize,
28}
29
30impl ThreadBuilder {
31 pub fn index(&self) -> usize {
33 self.index
34 }
35
36 pub fn name(&self) -> Option<&str> {
38 self.name.as_deref()
39 }
40
41 pub fn stack_size(&self) -> Option<usize> {
43 self.stack_size
44 }
45
46 pub fn run(self) {
49 unsafe { main_loop(self) }
50 }
51}
52
53impl fmt::Debug for ThreadBuilder {
54 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
55 f.debug_struct("ThreadBuilder")
56 .field("pool", &self.registry.id())
57 .field("index", &self.index)
58 .field("name", &self.name)
59 .field("stack_size", &self.stack_size)
60 .finish()
61 }
62}
63
64pub trait ThreadSpawn {
69 private_decl! {}
70
71 fn spawn(&mut self, thread: ThreadBuilder) -> io::Result<()>;
74}
75
76#[derive(Debug, Default)]
81pub struct DefaultSpawn;
82
83impl ThreadSpawn for DefaultSpawn {
84 private_impl! {}
85
86 fn spawn(&mut self, thread: ThreadBuilder) -> io::Result<()> {
87 let mut b = thread::Builder::new();
88 if let Some(name) = thread.name() {
89 b = b.name(name.to_owned());
90 }
91 if let Some(stack_size) = thread.stack_size() {
92 b = b.stack_size(stack_size);
93 }
94 b.spawn(|| thread.run())?;
95 Ok(())
96 }
97}
98
99#[derive(Debug)]
104pub struct CustomSpawn<F>(F);
105
106impl<F> CustomSpawn<F>
107where
108 F: FnMut(ThreadBuilder) -> io::Result<()>,
109{
110 pub(super) fn new(spawn: F) -> Self {
111 CustomSpawn(spawn)
112 }
113}
114
115impl<F> ThreadSpawn for CustomSpawn<F>
116where
117 F: FnMut(ThreadBuilder) -> io::Result<()>,
118{
119 private_impl! {}
120
121 #[inline]
122 fn spawn(&mut self, thread: ThreadBuilder) -> io::Result<()> {
123 (self.0)(thread)
124 }
125}
126
127pub struct Registry {
128 thread_infos: Vec<ThreadInfo>,
129 sleep: Sleep,
130 injected_jobs: Injector<JobRef>,
131 broadcasts: Mutex<Vec<Worker<JobRef>>>,
132 panic_handler: Option<Box<PanicHandler>>,
133 pub(crate) deadlock_handler: Option<Box<DeadlockHandler>>,
134 start_handler: Option<Box<StartHandler>>,
135 exit_handler: Option<Box<ExitHandler>>,
136 pub(crate) acquire_thread_handler: Option<Box<AcquireThreadHandler>>,
137 pub(crate) release_thread_handler: Option<Box<ReleaseThreadHandler>>,
138
139 terminate_count: AtomicUsize,
153}
154
155static mut THE_REGISTRY: Option<Arc<Registry>> = None;
159static THE_REGISTRY_SET: Once = Once::new();
160
161pub(super) fn global_registry() -> &'static Arc<Registry> {
165 set_global_registry(default_global_registry)
166 .or_else(|err| {
167 debug_assert!(THE_REGISTRY_SET.is_completed());
170 let the_registry = unsafe { &*ptr::addr_of!(THE_REGISTRY) };
171 the_registry.as_ref().ok_or(err)
172 })
173 .expect("The global thread pool has not been initialized.")
174}
175
176pub(super) fn init_global_registry<S>(
179 builder: ThreadPoolBuilder<S>,
180) -> Result<&'static Arc<Registry>, ThreadPoolBuildError>
181where
182 S: ThreadSpawn,
183{
184 set_global_registry(|| Registry::new(builder))
185}
186
187fn set_global_registry<F>(registry: F) -> Result<&'static Arc<Registry>, ThreadPoolBuildError>
190where
191 F: FnOnce() -> Result<Arc<Registry>, ThreadPoolBuildError>,
192{
193 let mut result = Err(ThreadPoolBuildError::new(ErrorKind::GlobalPoolAlreadyInitialized));
194
195 THE_REGISTRY_SET.call_once(|| {
196 result = registry().map(|registry: Arc<Registry>| {
197 unsafe {
200 ptr::addr_of_mut!(THE_REGISTRY).write(Some(registry));
201 (*ptr::addr_of!(THE_REGISTRY)).as_ref().unwrap_unchecked()
202 }
203 })
204 });
205
206 result
207}
208
209fn default_global_registry() -> Result<Arc<Registry>, ThreadPoolBuildError> {
210 let result = Registry::new(ThreadPoolBuilder::new());
211
212 let unsupported = matches!(&result, Err(e) if e.is_unsupported());
219 if unsupported && WorkerThread::current().is_null() {
220 let builder = ThreadPoolBuilder::new().num_threads(1).spawn_handler(|thread| {
221 let worker_thread = Box::leak(Box::new(WorkerThread::from(thread)));
225 let registry = &*worker_thread.registry;
226 let index = worker_thread.index;
227
228 unsafe {
229 WorkerThread::set_current(worker_thread);
230
231 Latch::set(®istry.thread_infos[index].primed);
233 }
234
235 Ok(())
236 });
237
238 let fallback_result = Registry::new(builder);
239 if fallback_result.is_ok() {
240 return fallback_result;
241 }
242 }
243
244 result
245}
246
247struct Terminator<'a>(&'a Arc<Registry>);
248
249impl<'a> Drop for Terminator<'a> {
250 fn drop(&mut self) {
251 self.0.terminate()
252 }
253}
254
255impl Registry {
256 pub(super) fn new<S>(
257 mut builder: ThreadPoolBuilder<S>,
258 ) -> Result<Arc<Self>, ThreadPoolBuildError>
259 where
260 S: ThreadSpawn,
261 {
262 let n_threads = Ord::min(builder.get_num_threads(), crate::max_num_threads());
264
265 let breadth_first = builder.get_breadth_first();
266
267 let (workers, stealers): (Vec<_>, Vec<_>) = (0..n_threads)
268 .map(|_| {
269 let worker = if breadth_first { Worker::new_fifo() } else { Worker::new_lifo() };
270
271 let stealer = worker.stealer();
272 (worker, stealer)
273 })
274 .unzip();
275
276 let (broadcasts, broadcast_stealers): (Vec<_>, Vec<_>) = (0..n_threads)
277 .map(|_| {
278 let worker = Worker::new_fifo();
279 let stealer = worker.stealer();
280 (worker, stealer)
281 })
282 .unzip();
283
284 let registry = Arc::new(Registry {
285 thread_infos: stealers.into_iter().map(ThreadInfo::new).collect(),
286 sleep: Sleep::new(n_threads),
287 injected_jobs: Injector::new(),
288 broadcasts: Mutex::new(broadcasts),
289 terminate_count: AtomicUsize::new(1),
290 panic_handler: builder.take_panic_handler(),
291 deadlock_handler: builder.take_deadlock_handler(),
292 start_handler: builder.take_start_handler(),
293 exit_handler: builder.take_exit_handler(),
294 acquire_thread_handler: builder.take_acquire_thread_handler(),
295 release_thread_handler: builder.take_release_thread_handler(),
296 });
297
298 let t1000 = Terminator(®istry);
300
301 for (index, (worker, stealer)) in workers.into_iter().zip(broadcast_stealers).enumerate() {
302 let thread = ThreadBuilder {
303 name: builder.get_thread_name(index),
304 stack_size: builder.get_stack_size(),
305 registry: Arc::clone(®istry),
306 worker,
307 stealer,
308 index,
309 };
310 if let Err(e) = builder.get_spawn_handler().spawn(thread) {
311 return Err(ThreadPoolBuildError::new(ErrorKind::IOError(e)));
312 }
313 }
314
315 mem::forget(t1000);
317
318 Ok(registry)
319 }
320
321 pub fn current() -> Arc<Registry> {
322 unsafe {
323 let worker_thread = WorkerThread::current();
324 let registry = if worker_thread.is_null() {
325 global_registry()
326 } else {
327 &(*worker_thread).registry
328 };
329 Arc::clone(registry)
330 }
331 }
332
333 pub(super) fn current_num_threads() -> usize {
337 unsafe {
338 let worker_thread = WorkerThread::current();
339 if worker_thread.is_null() {
340 global_registry().num_threads()
341 } else {
342 (*worker_thread).registry.num_threads()
343 }
344 }
345 }
346
347 pub(super) fn current_thread(&self) -> Option<&WorkerThread> {
349 unsafe {
350 let worker = WorkerThread::current().as_ref()?;
351 if worker.registry().id() == self.id() { Some(worker) } else { None }
352 }
353 }
354
355 pub(super) fn id(&self) -> RegistryId {
357 RegistryId { addr: self as *const Self as usize }
360 }
361
362 pub(super) fn num_threads(&self) -> usize {
363 self.thread_infos.len()
364 }
365
366 pub(super) fn catch_unwind(&self, f: impl FnOnce()) {
367 if let Err(err) = unwind::halt_unwinding(f) {
368 let abort_guard = unwind::AbortIfPanic;
370 if let Some(ref handler) = self.panic_handler {
371 handler(err);
372 mem::forget(abort_guard);
373 }
374 }
375 }
376
377 pub(super) fn wait_until_primed(&self) {
382 for info in &self.thread_infos {
383 info.primed.wait();
384 }
385 }
386
387 pub(super) fn wait_until_stopped(&self) {
390 self.release_thread();
391 for info in &self.thread_infos {
392 info.stopped.wait();
393 }
394 self.acquire_thread();
395 }
396
397 pub(crate) fn acquire_thread(&self) {
398 if let Some(ref acquire_thread_handler) = self.acquire_thread_handler {
399 acquire_thread_handler();
400 }
401 }
402
403 pub(crate) fn release_thread(&self) {
404 if let Some(ref release_thread_handler) = self.release_thread_handler {
405 release_thread_handler();
406 }
407 }
408
409 pub(super) fn inject_or_push(&self, job_ref: JobRef) {
419 let worker_thread = WorkerThread::current();
420 unsafe {
421 if !worker_thread.is_null() && (*worker_thread).registry().id() == self.id() {
422 (*worker_thread).push(job_ref);
423 } else {
424 self.inject(job_ref);
425 }
426 }
427 }
428
429 pub(super) fn inject(&self, injected_job: JobRef) {
433 debug_assert_ne!(
439 self.terminate_count.load(Ordering::Acquire),
440 0,
441 "inject() sees state.terminate as true"
442 );
443
444 let queue_was_empty = self.injected_jobs.is_empty();
445
446 self.injected_jobs.push(injected_job);
447 self.sleep.new_injected_jobs(1, queue_was_empty);
448 }
449
450 pub(crate) fn has_injected_job(&self) -> bool {
451 !self.injected_jobs.is_empty()
452 }
453
454 fn pop_injected_job(&self) -> Option<JobRef> {
455 loop {
456 match self.injected_jobs.steal() {
457 Steal::Success(job) => return Some(job),
458 Steal::Empty => return None,
459 Steal::Retry => {}
460 }
461 }
462 }
463
464 pub(super) fn inject_broadcast(&self, injected_jobs: impl ExactSizeIterator<Item = JobRef>) {
470 assert_eq!(self.num_threads(), injected_jobs.len());
471 {
472 let broadcasts = self.broadcasts.lock().unwrap();
473
474 debug_assert_ne!(
480 self.terminate_count.load(Ordering::Acquire),
481 0,
482 "inject_broadcast() sees state.terminate as true"
483 );
484
485 assert_eq!(broadcasts.len(), injected_jobs.len());
486 for (worker, job_ref) in broadcasts.iter().zip(injected_jobs) {
487 worker.push(job_ref);
488 }
489 }
490 for i in 0..self.num_threads() {
491 self.sleep.notify_worker_latch_is_set(i);
492 }
493 }
494
495 pub(super) fn in_worker<OP, R>(&self, op: OP) -> R
501 where
502 OP: FnOnce(&WorkerThread, bool) -> R + Send,
503 R: Send,
504 {
505 unsafe {
506 let worker_thread = WorkerThread::current();
507 if worker_thread.is_null() {
508 self.in_worker_cold(op)
509 } else if (*worker_thread).registry().id() != self.id() {
510 self.in_worker_cross(&*worker_thread, op)
511 } else {
512 op(&*worker_thread, false)
516 }
517 }
518 }
519
520 #[cold]
521 unsafe fn in_worker_cold<OP, R>(&self, op: OP) -> R
522 where
523 OP: FnOnce(&WorkerThread, bool) -> R + Send,
524 R: Send,
525 {
526 thread_local!(static LOCK_LATCH: LockLatch = LockLatch::new());
527
528 LOCK_LATCH.with(|l| {
529 debug_assert!(WorkerThread::current().is_null());
531 let job = StackJob::new(
532 Tlv::null(),
533 |injected| {
534 let worker_thread = WorkerThread::current();
535 assert!(injected && !worker_thread.is_null());
536 op(unsafe { &*worker_thread }, true)
537 },
538 LatchRef::new(l),
539 );
540 self.inject(unsafe { job.as_job_ref() });
541 self.release_thread();
542 job.latch.wait_and_reset(); self.acquire_thread();
544
545 unsafe { job.into_result() }
546 })
547 }
548
549 #[cold]
550 unsafe fn in_worker_cross<OP, R>(&self, current_thread: &WorkerThread, op: OP) -> R
551 where
552 OP: FnOnce(&WorkerThread, bool) -> R + Send,
553 R: Send,
554 {
555 debug_assert!(current_thread.registry().id() != self.id());
558 let latch = SpinLatch::cross(current_thread);
559 let job = StackJob::new(
560 Tlv::null(),
561 |injected| {
562 let worker_thread = WorkerThread::current();
563 assert!(injected && !worker_thread.is_null());
564 op(unsafe { &*worker_thread }, true)
565 },
566 latch,
567 );
568 self.inject(unsafe { job.as_job_ref() });
569 unsafe { current_thread.wait_until(&job.latch) };
570 unsafe { job.into_result() }
571 }
572
573 pub(super) fn increment_terminate_count(&self) {
594 let previous = self.terminate_count.fetch_add(1, Ordering::AcqRel);
595 debug_assert!(previous != 0, "registry ref count incremented from zero");
596 assert!(previous != usize::MAX, "overflow in registry ref count");
597 }
598
599 pub(super) fn terminate(&self) {
603 if self.terminate_count.fetch_sub(1, Ordering::AcqRel) == 1 {
604 for (i, thread_info) in self.thread_infos.iter().enumerate() {
605 unsafe { OnceLatch::set_and_tickle_one(&thread_info.terminate, self, i) };
606 }
607 }
608 }
609
610 pub(super) fn notify_worker_latch_is_set(&self, target_worker_index: usize) {
612 self.sleep.notify_worker_latch_is_set(target_worker_index);
613 }
614}
615
616#[inline]
619pub fn mark_blocked() {
620 let worker_thread = WorkerThread::current();
621 assert!(!worker_thread.is_null());
622 unsafe {
623 let registry = &(*worker_thread).registry;
624 registry.sleep.mark_blocked(®istry.deadlock_handler)
625 }
626}
627
628#[inline]
630pub fn mark_unblocked(registry: &Registry) {
631 registry.sleep.mark_unblocked()
632}
633
634#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord)]
635pub(super) struct RegistryId {
636 addr: usize,
637}
638
639struct ThreadInfo {
640 primed: LockLatch,
644
645 stopped: LockLatch,
648
649 terminate: OnceLatch,
654
655 stealer: Stealer<JobRef>,
657}
658
659impl ThreadInfo {
660 fn new(stealer: Stealer<JobRef>) -> ThreadInfo {
661 ThreadInfo {
662 primed: LockLatch::new(),
663 stopped: LockLatch::new(),
664 terminate: OnceLatch::new(),
665 stealer,
666 }
667 }
668}
669
670pub(super) struct WorkerThread {
674 worker: Worker<JobRef>,
676
677 stealer: Stealer<JobRef>,
679
680 fifo: JobFifo,
682
683 pub(crate) index: usize,
684
685 rng: XorShift64Star,
687
688 pub(crate) registry: Arc<Registry>,
689}
690
691thread_local! {
697 static WORKER_THREAD_STATE: Cell<*const WorkerThread> = const { Cell::new(ptr::null()) };
698}
699
700impl From<ThreadBuilder> for WorkerThread {
701 fn from(thread: ThreadBuilder) -> Self {
702 Self {
703 worker: thread.worker,
704 stealer: thread.stealer,
705 fifo: JobFifo::new(),
706 index: thread.index,
707 rng: XorShift64Star::new(),
708 registry: thread.registry,
709 }
710 }
711}
712
713impl Drop for WorkerThread {
714 fn drop(&mut self) {
715 WORKER_THREAD_STATE.with(|t| {
717 assert!(t.get().eq(&(self as *const _)));
718 t.set(ptr::null());
719 });
720 }
721}
722
723impl WorkerThread {
724 #[inline]
728 pub(super) fn current() -> *const WorkerThread {
729 WORKER_THREAD_STATE.with(Cell::get)
730 }
731
732 unsafe fn set_current(thread: *const WorkerThread) {
735 WORKER_THREAD_STATE.with(|t| {
736 assert!(t.get().is_null());
737 t.set(thread);
738 });
739 }
740
741 #[inline]
743 pub(super) fn registry(&self) -> &Arc<Registry> {
744 &self.registry
745 }
746
747 #[inline]
749 pub(super) fn index(&self) -> usize {
750 self.index
751 }
752
753 #[inline]
754 pub(super) unsafe fn push(&self, job: JobRef) {
755 let queue_was_empty = self.worker.is_empty();
756 self.worker.push(job);
757 self.registry.sleep.new_internal_jobs(1, queue_was_empty);
758 }
759
760 #[inline]
761 pub(super) unsafe fn push_fifo(&self, job: JobRef) {
762 unsafe { self.push(self.fifo.push(job)) };
763 }
764
765 #[inline]
766 pub(super) fn local_deque_is_empty(&self) -> bool {
767 self.worker.is_empty()
768 }
769
770 #[inline]
775 pub(super) fn take_local_job(&self) -> Option<JobRef> {
776 let popped_job = self.worker.pop();
777
778 if popped_job.is_some() {
779 return popped_job;
780 }
781
782 loop {
783 match self.stealer.steal() {
784 Steal::Success(job) => return Some(job),
785 Steal::Empty => return None,
786 Steal::Retry => {}
787 }
788 }
789 }
790
791 pub(super) fn has_injected_job(&self) -> bool {
792 !self.stealer.is_empty() || self.registry.has_injected_job()
793 }
794
795 #[inline]
798 pub(super) unsafe fn wait_until<L: AsCoreLatch + ?Sized>(&self, latch: &L) {
799 let latch = latch.as_core_latch();
800 if !latch.probe() {
801 unsafe { self.wait_until_cold(latch) };
802 }
803 }
804
805 #[cold]
806 unsafe fn wait_until_cold(&self, latch: &CoreLatch) {
807 let abort_guard = unwind::AbortIfPanic;
813
814 'outer: while !latch.probe() {
815 if let Some(job) = self.take_local_job() {
818 unsafe { self.execute(job) };
819 continue;
820 }
821
822 let mut idle_state = self.registry.sleep.start_looking(self.index);
823 while !latch.probe() {
824 if let Some(job) = self.find_work() {
825 self.registry.sleep.work_found();
826 unsafe { self.execute(job) };
827 continue 'outer;
829 } else {
830 self.registry.sleep.no_work_found(&mut idle_state, latch, &self)
831 }
832 }
833
834 self.registry.sleep.work_found();
837 break;
838 }
839
840 mem::forget(abort_guard); }
842
843 unsafe fn wait_until_out_of_work(&self) {
844 debug_assert_eq!(self as *const _, WorkerThread::current());
845 let registry = &*self.registry;
846 let index = self.index;
847
848 registry.acquire_thread();
849 unsafe { self.wait_until(®istry.thread_infos[index].terminate) };
850
851 debug_assert!(self.take_local_job().is_none());
853
854 unsafe { Latch::set(®istry.thread_infos[index].stopped) };
856 }
857
858 fn find_work(&self) -> Option<JobRef> {
859 self.take_local_job().or_else(|| self.steal()).or_else(|| self.registry.pop_injected_job())
865 }
866
867 pub(super) fn yield_now(&self) -> Yield {
868 match self.find_work() {
869 Some(job) => unsafe {
870 self.execute(job);
871 Yield::Executed
872 },
873 None => Yield::Idle,
874 }
875 }
876
877 pub(super) fn yield_local(&self) -> Yield {
878 match self.take_local_job() {
879 Some(job) => unsafe {
880 self.execute(job);
881 Yield::Executed
882 },
883 None => Yield::Idle,
884 }
885 }
886
887 #[inline]
888 pub(super) unsafe fn execute(&self, job: JobRef) {
889 unsafe { job.execute() };
890 }
891
892 fn steal(&self) -> Option<JobRef> {
897 debug_assert!(self.local_deque_is_empty());
899
900 let thread_infos = &self.registry.thread_infos.as_slice();
902 let num_threads = thread_infos.len();
903 if num_threads <= 1 {
904 return None;
905 }
906
907 loop {
908 let mut retry = false;
909 let start = self.rng.next_usize(num_threads);
910 let job = (start..num_threads)
911 .chain(0..start)
912 .filter(move |&i| i != self.index)
913 .find_map(|victim_index| {
914 let victim = &thread_infos[victim_index];
915 match victim.stealer.steal() {
916 Steal::Success(job) => Some(job),
917 Steal::Empty => None,
918 Steal::Retry => {
919 retry = true;
920 None
921 }
922 }
923 });
924 if job.is_some() || !retry {
925 return job;
926 }
927 }
928 }
929}
930
931unsafe fn main_loop(thread: ThreadBuilder) {
934 let worker_thread = &WorkerThread::from(thread);
935 unsafe { WorkerThread::set_current(worker_thread) };
936 let registry = &*worker_thread.registry;
937 let index = worker_thread.index;
938
939 unsafe { Latch::set(®istry.thread_infos[index].primed) };
941
942 let abort_guard = unwind::AbortIfPanic;
946
947 if let Some(ref handler) = registry.start_handler {
949 registry.catch_unwind(|| handler(index));
950 }
951
952 unsafe { worker_thread.wait_until_out_of_work() };
953
954 mem::forget(abort_guard);
956
957 if let Some(ref handler) = registry.exit_handler {
959 registry.catch_unwind(|| handler(index));
960 }
962
963 registry.release_thread();
964}
965
966pub(super) fn in_worker<OP, R>(op: OP) -> R
972where
973 OP: FnOnce(&WorkerThread, bool) -> R + Send,
974 R: Send,
975{
976 unsafe {
977 let owner_thread = WorkerThread::current();
978 if !owner_thread.is_null() {
979 op(&*owner_thread, false)
983 } else {
984 global_registry().in_worker(op)
985 }
986 }
987}
988
989struct XorShift64Star {
994 state: Cell<u64>,
995}
996
997impl XorShift64Star {
998 fn new() -> Self {
999 let mut seed = 0;
1001 while seed == 0 {
1002 let mut hasher = DefaultHasher::new();
1003 static COUNTER: AtomicUsize = AtomicUsize::new(0);
1004 hasher.write_usize(COUNTER.fetch_add(1, Ordering::Relaxed));
1005 seed = hasher.finish();
1006 }
1007
1008 XorShift64Star { state: Cell::new(seed) }
1009 }
1010
1011 fn next(&self) -> u64 {
1012 let mut x = self.state.get();
1013 debug_assert_ne!(x, 0);
1014 x ^= x >> 12;
1015 x ^= x << 25;
1016 x ^= x >> 27;
1017 self.state.set(x);
1018 x.wrapping_mul(0x2545_f491_4f6c_dd1d)
1019 }
1020
1021 fn next_usize(&self, n: usize) -> usize {
1023 (self.next() % n as u64) as usize
1024 }
1025}