mmtk/
global_state.rs

1use atomic_refcell::AtomicRefCell;
2use std::collections::HashMap;
3use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
4use std::sync::Mutex;
5use std::time::Instant;
6
7/// This stores some global states for an MMTK instance.
8/// Some MMTK components like plans and allocators may keep an reference to the struct, and can access it.
9// This used to be a part of the `BasePlan`. In that case, any component that accesses
10// the states needs a reference to the plan. It makes it harder for us to reason about the access pattern
11// for the plan, as many components hold references to the plan. Besides, the states
12// actually are not related with a plan, they are just global states for MMTK. So we refactored
13// those fields to this separate struct. For components that access the state, they just need
14// a reference to the struct, and are no longer dependent on the plan.
15// We may consider further break down the fields into smaller structs.
16pub struct GlobalState {
17    /// Whether MMTk is now ready for collection. This is set to true when initialize_collection() is called.
18    pub(crate) initialized: AtomicBool,
19    /// The current GC status.
20    pub(crate) gc_status: Mutex<GcStatus>,
21    /// When did the last GC start? Only accessed by the last parked worker.
22    pub(crate) gc_start_time: AtomicRefCell<Option<Instant>>,
23    /// Is the current GC an emergency collection? Emergency means we may run out of memory soon, and we should
24    /// attempt to collect as much as we can.
25    pub(crate) emergency_collection: AtomicBool,
26    /// Is the current GC triggered by the user?
27    pub(crate) user_triggered_collection: AtomicBool,
28    /// Is the current GC triggered internally by MMTK? This is unused for now. We may have internally triggered GC
29    /// for a concurrent plan.
30    pub(crate) internal_triggered_collection: AtomicBool,
31    /// Is the last GC internally triggered?
32    pub(crate) last_internal_triggered_collection: AtomicBool,
33    // Has an allocation succeeded since the emergency collection?
34    pub(crate) allocation_success: AtomicBool,
35    // Maximum number of failed attempts by a single thread
36    pub(crate) max_collection_attempts: AtomicUsize,
37    // Current collection attempt
38    pub(crate) cur_collection_attempts: AtomicUsize,
39    /// A counter for per-mutator stack scanning
40    pub(crate) scanned_stacks: AtomicUsize,
41    /// Have we scanned all the stacks?
42    pub(crate) stacks_prepared: AtomicBool,
43    /// A counter that keeps tracks of the number of bytes allocated since last stress test
44    pub(crate) allocation_bytes: AtomicUsize,
45    /// Are we inside the benchmark harness?
46    pub(crate) inside_harness: AtomicBool,
47    /// A counteer that keeps tracks of the number of bytes allocated by malloc
48    #[cfg(feature = "malloc_counted_size")]
49    pub(crate) malloc_bytes: AtomicUsize,
50    /// This stores the live bytes and the used bytes (by pages) for each space in last GC. This counter is only updated in the GC release phase.
51    pub(crate) live_bytes_in_last_gc: AtomicRefCell<HashMap<&'static str, LiveBytesStats>>,
52    /// The number of used pages at the end of the last GC. This can be used to estimate how many pages we have allocated since last GC.
53    pub(crate) used_pages_after_last_gc: AtomicUsize,
54}
55
56impl GlobalState {
57    /// Is MMTk initialized?
58    pub fn is_initialized(&self) -> bool {
59        self.initialized.load(Ordering::SeqCst)
60    }
61
62    /// Set the collection kind for the current GC. This is called before
63    /// scheduling collection to determin what kind of collection it will be.
64    pub fn set_collection_kind(
65        &self,
66        last_collection_was_exhaustive: bool,
67        heap_can_grow: bool,
68    ) -> bool {
69        self.cur_collection_attempts.store(
70            if self.user_triggered_collection.load(Ordering::Relaxed) {
71                1
72            } else {
73                self.determine_collection_attempts()
74            },
75            Ordering::Relaxed,
76        );
77
78        let emergency_collection = !self.is_internal_triggered_collection()
79            && last_collection_was_exhaustive
80            && self.cur_collection_attempts.load(Ordering::Relaxed) > 1
81            && !heap_can_grow;
82        self.emergency_collection
83            .store(emergency_collection, Ordering::Relaxed);
84
85        emergency_collection
86    }
87
88    fn determine_collection_attempts(&self) -> usize {
89        if !self.allocation_success.load(Ordering::Relaxed) {
90            self.max_collection_attempts.fetch_add(1, Ordering::Relaxed);
91        } else {
92            self.allocation_success.store(false, Ordering::Relaxed);
93            self.max_collection_attempts.store(1, Ordering::Relaxed);
94        }
95
96        self.max_collection_attempts.load(Ordering::Relaxed)
97    }
98
99    fn is_internal_triggered_collection(&self) -> bool {
100        let is_internal_triggered = self
101            .last_internal_triggered_collection
102            .load(Ordering::SeqCst);
103        // Remove this assertion when we have concurrent GC.
104        assert!(
105            !is_internal_triggered,
106            "We have no concurrent GC implemented. We should not have internally triggered GC"
107        );
108        is_internal_triggered
109    }
110
111    pub fn is_emergency_collection(&self) -> bool {
112        self.emergency_collection.load(Ordering::Relaxed)
113    }
114
115    /// Return true if this collection was triggered by application code.
116    pub fn is_user_triggered_collection(&self) -> bool {
117        self.user_triggered_collection.load(Ordering::Relaxed)
118    }
119
120    /// Reset collection state information.
121    pub fn reset_collection_trigger(&self) {
122        self.last_internal_triggered_collection.store(
123            self.internal_triggered_collection.load(Ordering::SeqCst),
124            Ordering::Relaxed,
125        );
126        self.internal_triggered_collection
127            .store(false, Ordering::SeqCst);
128        self.user_triggered_collection
129            .store(false, Ordering::Relaxed);
130    }
131
132    /// Are the stacks scanned?
133    pub fn stacks_prepared(&self) -> bool {
134        self.stacks_prepared.load(Ordering::SeqCst)
135    }
136
137    /// Prepare for stack scanning. This is usually used with `inform_stack_scanned()`.
138    /// This should be called before doing stack scanning.
139    pub fn prepare_for_stack_scanning(&self) {
140        self.scanned_stacks.store(0, Ordering::SeqCst);
141        self.stacks_prepared.store(false, Ordering::SeqCst);
142    }
143
144    /// Inform that 1 stack has been scanned. The argument `n_mutators` indicates the
145    /// total stacks we should scan. This method returns true if the number of scanned
146    /// stacks equals the total mutator count. Otherwise it returns false. This method
147    /// is thread safe and we guarantee only one thread will return true.
148    pub fn inform_stack_scanned(&self, n_mutators: usize) -> bool {
149        let old = self.scanned_stacks.fetch_add(1, Ordering::SeqCst);
150        debug_assert!(
151            old < n_mutators,
152            "The number of scanned stacks ({}) is more than the number of mutators ({})",
153            old,
154            n_mutators
155        );
156        let scanning_done = old + 1 == n_mutators;
157        if scanning_done {
158            self.stacks_prepared.store(true, Ordering::SeqCst);
159        }
160        scanning_done
161    }
162
163    /// Increase the allocation bytes and return the current allocation bytes after increasing
164    pub fn increase_allocation_bytes_by(&self, size: usize) -> usize {
165        let old_allocation_bytes = self.allocation_bytes.fetch_add(size, Ordering::SeqCst);
166        trace!(
167            "Stress GC: old_allocation_bytes = {}, size = {}, allocation_bytes = {}",
168            old_allocation_bytes,
169            size,
170            self.allocation_bytes.load(Ordering::Relaxed),
171        );
172        old_allocation_bytes + size
173    }
174
175    #[cfg(feature = "malloc_counted_size")]
176    pub fn get_malloc_bytes_in_pages(&self) -> usize {
177        crate::util::conversions::bytes_to_pages_up(self.malloc_bytes.load(Ordering::Relaxed))
178    }
179
180    #[cfg(feature = "malloc_counted_size")]
181    pub(crate) fn increase_malloc_bytes_by(&self, size: usize) {
182        self.malloc_bytes.fetch_add(size, Ordering::SeqCst);
183    }
184
185    #[cfg(feature = "malloc_counted_size")]
186    pub(crate) fn decrease_malloc_bytes_by(&self, size: usize) {
187        self.malloc_bytes.fetch_sub(size, Ordering::SeqCst);
188    }
189
190    pub(crate) fn set_used_pages_after_last_gc(&self, pages: usize) {
191        self.used_pages_after_last_gc
192            .store(pages, Ordering::Relaxed);
193    }
194
195    pub(crate) fn get_used_pages_after_last_gc(&self) -> usize {
196        self.used_pages_after_last_gc.load(Ordering::Relaxed)
197    }
198}
199
200impl Default for GlobalState {
201    fn default() -> Self {
202        Self {
203            initialized: AtomicBool::new(false),
204            gc_status: Mutex::new(GcStatus::NotInGC),
205            gc_start_time: AtomicRefCell::new(None),
206            stacks_prepared: AtomicBool::new(false),
207            emergency_collection: AtomicBool::new(false),
208            user_triggered_collection: AtomicBool::new(false),
209            internal_triggered_collection: AtomicBool::new(false),
210            last_internal_triggered_collection: AtomicBool::new(false),
211            allocation_success: AtomicBool::new(false),
212            max_collection_attempts: AtomicUsize::new(0),
213            cur_collection_attempts: AtomicUsize::new(0),
214            scanned_stacks: AtomicUsize::new(0),
215            allocation_bytes: AtomicUsize::new(0),
216            inside_harness: AtomicBool::new(false),
217            #[cfg(feature = "malloc_counted_size")]
218            malloc_bytes: AtomicUsize::new(0),
219            live_bytes_in_last_gc: AtomicRefCell::new(HashMap::new()),
220            used_pages_after_last_gc: AtomicUsize::new(0),
221        }
222    }
223}
224
225#[derive(PartialEq)]
226pub enum GcStatus {
227    NotInGC,
228    GcPrepare,
229    GcProper,
230}
231
232/// Statistics for the live bytes in the last GC. The statistics is per space.
233#[derive(Copy, Clone, Debug)]
234pub struct LiveBytesStats {
235    /// Total accumulated bytes of live objects in the space.
236    pub live_bytes: usize,
237    /// Total pages used by the space.
238    pub used_pages: usize,
239    /// Total bytes used by the space, computed from `used_pages`.
240    /// The ratio of live_bytes and used_bytes reflects the utilization of the memory in the space.
241    pub used_bytes: usize,
242}