mmtk/policy/immix/
defrag.rs

1use super::{
2    block::{Block, BlockState},
3    line::Line,
4    ImmixSpace,
5};
6use crate::util::linear_scan::Region;
7use crate::{policy::space::Space, Plan};
8use crate::{util::constants::LOG_BYTES_IN_PAGE, vm::*};
9use spin::Mutex;
10use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
11
12pub type Histogram = [usize; Defrag::NUM_BINS];
13
14#[derive(Debug, Default)]
15pub struct Defrag {
16    /// Is current GC a defrag GC?
17    in_defrag_collection: AtomicBool,
18    /// Is defrag space exhausted?
19    defrag_space_exhausted: AtomicBool,
20    /// A list of completed mark histograms reported by workers
21    pub mark_histograms: Mutex<Vec<Histogram>>,
22    /// A block with number of holes greater than this threshold will be defragmented.
23    pub defrag_spill_threshold: AtomicUsize,
24    /// The number of remaining clean pages in defrag space.
25    available_clean_pages_for_defrag: AtomicUsize,
26}
27
28pub struct StatsForDefrag {
29    total_pages: usize,
30    reserved_pages: usize,
31    collection_reserved_pages: usize,
32}
33
34impl StatsForDefrag {
35    pub fn new<VM: VMBinding>(plan: &dyn Plan<VM = VM>) -> Self {
36        Self {
37            total_pages: plan.get_total_pages(),
38            reserved_pages: plan.get_reserved_pages(),
39            collection_reserved_pages: plan.get_collection_reserved_pages(),
40        }
41    }
42}
43
44impl Defrag {
45    const NUM_BINS: usize = (Block::LINES >> 1) + 1;
46    const DEFRAG_LINE_REUSE_RATIO: f32 = 0.99;
47    const MIN_SPILL_THRESHOLD: usize = 2;
48
49    /// Allocate a new local histogram.
50    pub const fn new_histogram(&self) -> Histogram {
51        [0; Self::NUM_BINS]
52    }
53
54    /// Report back a completed mark histogram
55    pub fn add_completed_mark_histogram(&self, histogram: Histogram) {
56        self.mark_histograms.lock().push(histogram)
57    }
58
59    /// Check if the current GC is a defrag GC.
60    pub fn in_defrag(&self) -> bool {
61        self.in_defrag_collection.load(Ordering::Acquire)
62    }
63
64    /// Determine whether the current GC should do defragmentation.
65    #[allow(clippy::too_many_arguments)]
66    pub fn decide_whether_to_defrag(
67        &self,
68        defrag_enabled: bool,
69        emergency_collection: bool,
70        collect_whole_heap: bool,
71        collection_attempts: usize,
72        user_triggered: bool,
73        exhausted_reusable_space: bool,
74        full_heap_system_gc: bool,
75        stress_defrag: bool,
76    ) {
77        let in_defrag = defrag_enabled
78            && (emergency_collection
79                || (collection_attempts > 1)
80                || !exhausted_reusable_space
81                || stress_defrag
82                || (collect_whole_heap && user_triggered && full_heap_system_gc));
83        info!("Defrag: {}", in_defrag);
84        probe!(mmtk, immix_defrag, in_defrag);
85        self.in_defrag_collection
86            .store(in_defrag, Ordering::Release)
87    }
88
89    /// Get the number of defrag headroom pages.
90    pub fn defrag_headroom_pages<VM: VMBinding>(&self, space: &ImmixSpace<VM>) -> usize {
91        space.get_page_resource().reserved_pages()
92            * (*space.common().options.immix_defrag_headroom_percent)
93            / 100
94    }
95
96    /// Check if the defrag space is exhausted.
97    pub fn space_exhausted(&self) -> bool {
98        self.defrag_space_exhausted.load(Ordering::Acquire)
99    }
100
101    /// Update available_clean_pages_for_defrag counter when a clean block is allocated.
102    pub fn notify_new_clean_block(&self, copy: bool) {
103        if copy {
104            let available_clean_pages_for_defrag =
105                self.available_clean_pages_for_defrag.fetch_update(
106                    Ordering::SeqCst,
107                    Ordering::SeqCst,
108                    |available_clean_pages_for_defrag| {
109                        if available_clean_pages_for_defrag <= Block::PAGES {
110                            Some(0)
111                        } else {
112                            Some(available_clean_pages_for_defrag - Block::PAGES)
113                        }
114                    },
115                );
116            if available_clean_pages_for_defrag.unwrap() <= Block::PAGES {
117                self.defrag_space_exhausted.store(true, Ordering::SeqCst);
118            }
119        }
120    }
121
122    /// Prepare work. Should be called in ImmixSpace::prepare.
123    pub fn prepare<VM: VMBinding>(&self, space: &ImmixSpace<VM>, plan_stats: StatsForDefrag) {
124        debug_assert!(space.is_defrag_enabled());
125        self.defrag_space_exhausted.store(false, Ordering::Release);
126
127        // Calculate available free space for defragmentation.
128
129        let mut available_clean_pages_for_defrag = plan_stats.total_pages as isize
130            - plan_stats.reserved_pages as isize
131            + self.defrag_headroom_pages(space) as isize;
132        if available_clean_pages_for_defrag < 0 {
133            available_clean_pages_for_defrag = 0
134        };
135
136        self.available_clean_pages_for_defrag
137            .store(available_clean_pages_for_defrag as usize, Ordering::Release);
138
139        if self.in_defrag() {
140            self.establish_defrag_spill_threshold(space)
141        }
142
143        self.available_clean_pages_for_defrag.store(
144            available_clean_pages_for_defrag as usize + plan_stats.collection_reserved_pages,
145            Ordering::Release,
146        );
147    }
148
149    /// Get the numebr of all the recyclable lines in all the reusable blocks.
150    fn get_available_lines<VM: VMBinding>(
151        &self,
152        space: &ImmixSpace<VM>,
153        spill_avail_histograms: &mut Histogram,
154    ) -> usize {
155        let mut total_available_lines = 0;
156        space.reusable_blocks.iterate_blocks(|block| {
157            let bucket = block.get_holes();
158            let unavailable_lines = match block.get_state() {
159                BlockState::Reusable { unavailable_lines } => unavailable_lines as usize,
160                s => unreachable!("{:?} {:?}", block, s),
161            };
162            let available_lines = Block::LINES - unavailable_lines;
163            spill_avail_histograms[bucket] += available_lines;
164            total_available_lines += available_lines;
165        });
166        total_available_lines
167    }
168
169    /// Calculate the defrag threshold.
170    fn establish_defrag_spill_threshold<VM: VMBinding>(&self, space: &ImmixSpace<VM>) {
171        let mut spill_avail_histograms = self.new_histogram();
172        let clean_lines = self.get_available_lines(space, &mut spill_avail_histograms);
173        let available_lines = clean_lines
174            + (self
175                .available_clean_pages_for_defrag
176                .load(Ordering::Acquire)
177                << (LOG_BYTES_IN_PAGE as usize - Line::LOG_BYTES));
178
179        // Number of lines we will evacuate.
180        let mut required_lines = 0isize;
181        // Number of to-space free lines we can use for defragmentation.
182        let mut limit = (available_lines as f32 / Self::DEFRAG_LINE_REUSE_RATIO) as isize;
183        let mut threshold = Block::LINES >> 1;
184        let mark_histograms = self.mark_histograms.lock();
185        // Blocks are grouped by buckets, indexed by the number of holes in the block.
186        // `mark_histograms` remembers the number of live lines for each bucket.
187        // Here, reversely iterate all the bucket to find a threshold that all buckets above this
188        // threshold can be evacuated, without causing to-space overflow.
189        for index in (Self::MIN_SPILL_THRESHOLD..Self::NUM_BINS).rev() {
190            threshold = index;
191            // Calculate total number of live lines in this bucket.
192            let this_bucket_mark = mark_histograms
193                .iter()
194                .map(|v| v[threshold] as isize)
195                .sum::<isize>();
196            // Calculate the number of free lines in this bucket.
197            let this_bucket_avail = spill_avail_histograms[threshold] as isize;
198            // Update counters
199            limit -= this_bucket_avail;
200            required_lines += this_bucket_mark;
201            // Stop scanning. Lines to evacuate exceeds the free to-space lines.
202            if limit < required_lines {
203                break;
204            }
205        }
206        // println!("threshold: {}", threshold);
207        debug_assert!(threshold >= Self::MIN_SPILL_THRESHOLD);
208        self.defrag_spill_threshold
209            .store(threshold, Ordering::Release);
210    }
211
212    /// Reset the in-defrag state.
213    pub fn reset_in_defrag(&self) {
214        self.in_defrag_collection.store(false, Ordering::Release);
215    }
216}