1use atomic_refcell::AtomicRefCell;
2use std::collections::HashMap;
3use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
4use std::sync::Mutex;
5use std::time::Instant;
6
7pub struct GlobalState {
17 pub(crate) initialized: AtomicBool,
19 pub(crate) gc_status: Mutex<GcStatus>,
21 pub(crate) gc_start_time: AtomicRefCell<Option<Instant>>,
23 pub(crate) emergency_collection: AtomicBool,
26 pub(crate) user_triggered_collection: AtomicBool,
28 pub(crate) internal_triggered_collection: AtomicBool,
31 pub(crate) last_internal_triggered_collection: AtomicBool,
33 pub(crate) allocation_success: AtomicBool,
35 pub(crate) max_collection_attempts: AtomicUsize,
37 pub(crate) cur_collection_attempts: AtomicUsize,
39 pub(crate) scanned_stacks: AtomicUsize,
41 pub(crate) stacks_prepared: AtomicBool,
43 pub(crate) allocation_bytes: AtomicUsize,
45 pub(crate) inside_harness: AtomicBool,
47 #[cfg(feature = "malloc_counted_size")]
49 pub(crate) malloc_bytes: AtomicUsize,
50 pub(crate) live_bytes_in_last_gc: AtomicRefCell<HashMap<&'static str, LiveBytesStats>>,
52 pub(crate) used_pages_after_last_gc: AtomicUsize,
54}
55
56impl GlobalState {
57 pub fn is_initialized(&self) -> bool {
59 self.initialized.load(Ordering::SeqCst)
60 }
61
62 pub fn set_collection_kind(
65 &self,
66 last_collection_was_exhaustive: bool,
67 heap_can_grow: bool,
68 ) -> bool {
69 self.cur_collection_attempts.store(
70 if self.user_triggered_collection.load(Ordering::Relaxed) {
71 1
72 } else {
73 self.determine_collection_attempts()
74 },
75 Ordering::Relaxed,
76 );
77
78 let emergency_collection = !self.is_internal_triggered_collection()
79 && last_collection_was_exhaustive
80 && self.cur_collection_attempts.load(Ordering::Relaxed) > 1
81 && !heap_can_grow;
82 self.emergency_collection
83 .store(emergency_collection, Ordering::Relaxed);
84
85 emergency_collection
86 }
87
88 fn determine_collection_attempts(&self) -> usize {
89 if !self.allocation_success.load(Ordering::Relaxed) {
90 self.max_collection_attempts.fetch_add(1, Ordering::Relaxed);
91 } else {
92 self.allocation_success.store(false, Ordering::Relaxed);
93 self.max_collection_attempts.store(1, Ordering::Relaxed);
94 }
95
96 self.max_collection_attempts.load(Ordering::Relaxed)
97 }
98
99 fn is_internal_triggered_collection(&self) -> bool {
100 let is_internal_triggered = self
101 .last_internal_triggered_collection
102 .load(Ordering::SeqCst);
103 assert!(
105 !is_internal_triggered,
106 "We have no concurrent GC implemented. We should not have internally triggered GC"
107 );
108 is_internal_triggered
109 }
110
111 pub fn is_emergency_collection(&self) -> bool {
112 self.emergency_collection.load(Ordering::Relaxed)
113 }
114
115 pub fn is_user_triggered_collection(&self) -> bool {
117 self.user_triggered_collection.load(Ordering::Relaxed)
118 }
119
120 pub fn reset_collection_trigger(&self) {
122 self.last_internal_triggered_collection.store(
123 self.internal_triggered_collection.load(Ordering::SeqCst),
124 Ordering::Relaxed,
125 );
126 self.internal_triggered_collection
127 .store(false, Ordering::SeqCst);
128 self.user_triggered_collection
129 .store(false, Ordering::Relaxed);
130 }
131
132 pub fn stacks_prepared(&self) -> bool {
134 self.stacks_prepared.load(Ordering::SeqCst)
135 }
136
137 pub fn prepare_for_stack_scanning(&self) {
140 self.scanned_stacks.store(0, Ordering::SeqCst);
141 self.stacks_prepared.store(false, Ordering::SeqCst);
142 }
143
144 pub fn inform_stack_scanned(&self, n_mutators: usize) -> bool {
149 let old = self.scanned_stacks.fetch_add(1, Ordering::SeqCst);
150 debug_assert!(
151 old < n_mutators,
152 "The number of scanned stacks ({}) is more than the number of mutators ({})",
153 old,
154 n_mutators
155 );
156 let scanning_done = old + 1 == n_mutators;
157 if scanning_done {
158 self.stacks_prepared.store(true, Ordering::SeqCst);
159 }
160 scanning_done
161 }
162
163 pub fn increase_allocation_bytes_by(&self, size: usize) -> usize {
165 let old_allocation_bytes = self.allocation_bytes.fetch_add(size, Ordering::SeqCst);
166 trace!(
167 "Stress GC: old_allocation_bytes = {}, size = {}, allocation_bytes = {}",
168 old_allocation_bytes,
169 size,
170 self.allocation_bytes.load(Ordering::Relaxed),
171 );
172 old_allocation_bytes + size
173 }
174
175 #[cfg(feature = "malloc_counted_size")]
176 pub fn get_malloc_bytes_in_pages(&self) -> usize {
177 crate::util::conversions::bytes_to_pages_up(self.malloc_bytes.load(Ordering::Relaxed))
178 }
179
180 #[cfg(feature = "malloc_counted_size")]
181 pub(crate) fn increase_malloc_bytes_by(&self, size: usize) {
182 self.malloc_bytes.fetch_add(size, Ordering::SeqCst);
183 }
184
185 #[cfg(feature = "malloc_counted_size")]
186 pub(crate) fn decrease_malloc_bytes_by(&self, size: usize) {
187 self.malloc_bytes.fetch_sub(size, Ordering::SeqCst);
188 }
189
190 pub(crate) fn set_used_pages_after_last_gc(&self, pages: usize) {
191 self.used_pages_after_last_gc
192 .store(pages, Ordering::Relaxed);
193 }
194
195 pub(crate) fn get_used_pages_after_last_gc(&self) -> usize {
196 self.used_pages_after_last_gc.load(Ordering::Relaxed)
197 }
198}
199
200impl Default for GlobalState {
201 fn default() -> Self {
202 Self {
203 initialized: AtomicBool::new(false),
204 gc_status: Mutex::new(GcStatus::NotInGC),
205 gc_start_time: AtomicRefCell::new(None),
206 stacks_prepared: AtomicBool::new(false),
207 emergency_collection: AtomicBool::new(false),
208 user_triggered_collection: AtomicBool::new(false),
209 internal_triggered_collection: AtomicBool::new(false),
210 last_internal_triggered_collection: AtomicBool::new(false),
211 allocation_success: AtomicBool::new(false),
212 max_collection_attempts: AtomicUsize::new(0),
213 cur_collection_attempts: AtomicUsize::new(0),
214 scanned_stacks: AtomicUsize::new(0),
215 allocation_bytes: AtomicUsize::new(0),
216 inside_harness: AtomicBool::new(false),
217 #[cfg(feature = "malloc_counted_size")]
218 malloc_bytes: AtomicUsize::new(0),
219 live_bytes_in_last_gc: AtomicRefCell::new(HashMap::new()),
220 used_pages_after_last_gc: AtomicUsize::new(0),
221 }
222 }
223}
224
225#[derive(PartialEq)]
226pub enum GcStatus {
227 NotInGC,
228 GcPrepare,
229 GcProper,
230}
231
232#[derive(Copy, Clone, Debug)]
234pub struct LiveBytesStats {
235 pub live_bytes: usize,
237 pub used_pages: usize,
239 pub used_bytes: usize,
242}