use atomic_refcell::AtomicRefCell;
use std::collections::HashMap;
use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
use std::sync::Mutex;
use std::time::Instant;
pub struct GlobalState {
pub(crate) initialized: AtomicBool,
pub(crate) gc_status: Mutex<GcStatus>,
pub(crate) gc_start_time: AtomicRefCell<Option<Instant>>,
pub(crate) emergency_collection: AtomicBool,
pub(crate) user_triggered_collection: AtomicBool,
pub(crate) internal_triggered_collection: AtomicBool,
pub(crate) last_internal_triggered_collection: AtomicBool,
pub(crate) allocation_success: AtomicBool,
pub(crate) max_collection_attempts: AtomicUsize,
pub(crate) cur_collection_attempts: AtomicUsize,
pub(crate) scanned_stacks: AtomicUsize,
pub(crate) stacks_prepared: AtomicBool,
pub(crate) allocation_bytes: AtomicUsize,
#[cfg(feature = "malloc_counted_size")]
pub(crate) malloc_bytes: AtomicUsize,
pub(crate) live_bytes_in_last_gc: AtomicRefCell<HashMap<&'static str, LiveBytesStats>>,
}
impl GlobalState {
pub fn is_initialized(&self) -> bool {
self.initialized.load(Ordering::SeqCst)
}
pub fn set_collection_kind(
&self,
last_collection_was_exhaustive: bool,
heap_can_grow: bool,
) -> bool {
self.cur_collection_attempts.store(
if self.user_triggered_collection.load(Ordering::Relaxed) {
1
} else {
self.determine_collection_attempts()
},
Ordering::Relaxed,
);
let emergency_collection = !self.is_internal_triggered_collection()
&& last_collection_was_exhaustive
&& self.cur_collection_attempts.load(Ordering::Relaxed) > 1
&& !heap_can_grow;
self.emergency_collection
.store(emergency_collection, Ordering::Relaxed);
emergency_collection
}
fn determine_collection_attempts(&self) -> usize {
if !self.allocation_success.load(Ordering::Relaxed) {
self.max_collection_attempts.fetch_add(1, Ordering::Relaxed);
} else {
self.allocation_success.store(false, Ordering::Relaxed);
self.max_collection_attempts.store(1, Ordering::Relaxed);
}
self.max_collection_attempts.load(Ordering::Relaxed)
}
fn is_internal_triggered_collection(&self) -> bool {
let is_internal_triggered = self
.last_internal_triggered_collection
.load(Ordering::SeqCst);
assert!(
!is_internal_triggered,
"We have no concurrent GC implemented. We should not have internally triggered GC"
);
is_internal_triggered
}
pub fn is_emergency_collection(&self) -> bool {
self.emergency_collection.load(Ordering::Relaxed)
}
pub fn is_user_triggered_collection(&self) -> bool {
self.user_triggered_collection.load(Ordering::Relaxed)
}
pub fn reset_collection_trigger(&self) {
self.last_internal_triggered_collection.store(
self.internal_triggered_collection.load(Ordering::SeqCst),
Ordering::Relaxed,
);
self.internal_triggered_collection
.store(false, Ordering::SeqCst);
self.user_triggered_collection
.store(false, Ordering::Relaxed);
}
pub fn stacks_prepared(&self) -> bool {
self.stacks_prepared.load(Ordering::SeqCst)
}
pub fn prepare_for_stack_scanning(&self) {
self.scanned_stacks.store(0, Ordering::SeqCst);
self.stacks_prepared.store(false, Ordering::SeqCst);
}
pub fn inform_stack_scanned(&self, n_mutators: usize) -> bool {
let old = self.scanned_stacks.fetch_add(1, Ordering::SeqCst);
debug_assert!(
old < n_mutators,
"The number of scanned stacks ({}) is more than the number of mutators ({})",
old,
n_mutators
);
let scanning_done = old + 1 == n_mutators;
if scanning_done {
self.stacks_prepared.store(true, Ordering::SeqCst);
}
scanning_done
}
pub fn increase_allocation_bytes_by(&self, size: usize) -> usize {
let old_allocation_bytes = self.allocation_bytes.fetch_add(size, Ordering::SeqCst);
trace!(
"Stress GC: old_allocation_bytes = {}, size = {}, allocation_bytes = {}",
old_allocation_bytes,
size,
self.allocation_bytes.load(Ordering::Relaxed),
);
old_allocation_bytes + size
}
#[cfg(feature = "malloc_counted_size")]
pub fn get_malloc_bytes_in_pages(&self) -> usize {
crate::util::conversions::bytes_to_pages_up(self.malloc_bytes.load(Ordering::Relaxed))
}
#[cfg(feature = "malloc_counted_size")]
pub(crate) fn increase_malloc_bytes_by(&self, size: usize) {
self.malloc_bytes.fetch_add(size, Ordering::SeqCst);
}
#[cfg(feature = "malloc_counted_size")]
pub(crate) fn decrease_malloc_bytes_by(&self, size: usize) {
self.malloc_bytes.fetch_sub(size, Ordering::SeqCst);
}
}
impl Default for GlobalState {
fn default() -> Self {
Self {
initialized: AtomicBool::new(false),
gc_status: Mutex::new(GcStatus::NotInGC),
gc_start_time: AtomicRefCell::new(None),
stacks_prepared: AtomicBool::new(false),
emergency_collection: AtomicBool::new(false),
user_triggered_collection: AtomicBool::new(false),
internal_triggered_collection: AtomicBool::new(false),
last_internal_triggered_collection: AtomicBool::new(false),
allocation_success: AtomicBool::new(false),
max_collection_attempts: AtomicUsize::new(0),
cur_collection_attempts: AtomicUsize::new(0),
scanned_stacks: AtomicUsize::new(0),
allocation_bytes: AtomicUsize::new(0),
#[cfg(feature = "malloc_counted_size")]
malloc_bytes: AtomicUsize::new(0),
live_bytes_in_last_gc: AtomicRefCell::new(HashMap::new()),
}
}
}
#[derive(PartialEq)]
pub enum GcStatus {
NotInGC,
GcPrepare,
GcProper,
}
#[derive(Copy, Clone, Debug)]
pub struct LiveBytesStats {
pub live_bytes: usize,
pub used_pages: usize,
pub used_bytes: usize,
}