mmtk/util/rust_util/
mod.rs1pub mod atomic_box;
6pub mod rev_group;
7pub mod zeroed_alloc;
8
9pub const fn min_of_usize(a: usize, b: usize) -> usize {
11 if a > b {
12 b
13 } else {
14 a
15 }
16}
17
18#[cfg(feature = "nightly")]
19pub use core::intrinsics::{likely, unlikely};
20
21#[cfg(not(feature = "nightly"))]
25#[inline]
26#[cold]
27fn cold() {}
28
29#[cfg(not(feature = "nightly"))]
30#[inline]
31pub fn likely(b: bool) -> bool {
32 if !b {
33 cold();
34 }
35 b
36}
37#[cfg(not(feature = "nightly"))]
38#[inline]
39pub fn unlikely(b: bool) -> bool {
40 if b {
41 cold();
42 }
43 b
44}
45
46use std::cell::UnsafeCell;
47use std::mem::MaybeUninit;
48use std::sync::Once;
49
50pub struct InitializeOnce<T: 'static> {
55 v: UnsafeCell<MaybeUninit<T>>,
56 once: Once,
58}
59
60impl<T> InitializeOnce<T> {
61 pub const fn new() -> Self {
62 InitializeOnce {
63 v: UnsafeCell::new(MaybeUninit::uninit()),
64 once: Once::new(),
65 }
66 }
67
68 pub fn initialize_once(&self, init_fn: &'static dyn Fn() -> T) {
73 self.once.call_once(|| {
74 unsafe { &mut *self.v.get() }.write(init_fn());
75 });
76 debug_assert!(self.once.is_completed());
77 }
78
79 pub fn get_ref(&self) -> &T {
81 debug_assert!(self.once.is_completed());
83 unsafe { (*self.v.get()).assume_init_ref() }
84 }
85
86 #[allow(clippy::mut_from_ref)]
93 pub unsafe fn get_mut(&self) -> &mut T {
94 debug_assert!(self.once.is_completed());
96 unsafe { (*self.v.get()).assume_init_mut() }
97 }
98}
99
100impl<T> std::ops::Deref for InitializeOnce<T> {
101 type Target = T;
102 fn deref(&self) -> &Self::Target {
103 self.get_ref()
104 }
105}
106
107unsafe impl<T> Sync for InitializeOnce<T> {}
108
109pub fn debug_process_thread_id() -> String {
111 use crate::util::os::*;
112 format!(
113 "PID: {}, TID: {}",
114 OS::get_process_id().map_or("(Failed to get PID)".to_string(), |pid| format!("{}", pid)),
115 OS::get_thread_id().map_or("(Failed to get TID)".to_string(), |tid| format!("{}", tid)),
116 )
117}
118
119#[cfg(test)]
120mod initialize_once_tests {
121 use super::*;
122
123 #[test]
124 fn test_threads_compete_initialize() {
125 use std::sync::atomic::AtomicUsize;
126 use std::sync::atomic::Ordering;
127 use std::thread;
128
129 const N_THREADS: usize = 1000;
131 static I: InitializeOnce<usize> = InitializeOnce::new();
133 static INITIALIZE_COUNT: AtomicUsize = AtomicUsize::new(0);
135 fn initialize_usize() -> usize {
137 INITIALIZE_COUNT.fetch_add(1, Ordering::SeqCst);
138 42
139 }
140
141 let mut threads = vec![];
142 for _ in 1..N_THREADS {
143 threads.push(thread::spawn(|| {
144 I.initialize_once(&initialize_usize);
145 assert_eq!(*I, 42);
147 }));
148 }
149 threads.into_iter().for_each(|t| t.join().unwrap());
150
151 assert_eq!(INITIALIZE_COUNT.load(Ordering::SeqCst), 1);
153 }
154}