mmtk/policy/immix/
defrag.rs1use super::{
2 block::{Block, BlockState},
3 line::Line,
4 ImmixSpace,
5};
6use crate::util::linear_scan::Region;
7use crate::{policy::space::Space, Plan};
8use crate::{util::constants::LOG_BYTES_IN_PAGE, vm::*};
9use spin::Mutex;
10use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
11
12pub type Histogram = [usize; Defrag::NUM_BINS];
13
14#[derive(Debug, Default)]
15pub struct Defrag {
16 in_defrag_collection: AtomicBool,
18 defrag_space_exhausted: AtomicBool,
20 pub mark_histograms: Mutex<Vec<Histogram>>,
22 pub defrag_spill_threshold: AtomicUsize,
24 available_clean_pages_for_defrag: AtomicUsize,
26}
27
28pub struct StatsForDefrag {
29 total_pages: usize,
30 reserved_pages: usize,
31 collection_reserved_pages: usize,
32}
33
34impl StatsForDefrag {
35 pub fn new<VM: VMBinding>(plan: &dyn Plan<VM = VM>) -> Self {
36 Self {
37 total_pages: plan.get_total_pages(),
38 reserved_pages: plan.get_reserved_pages(),
39 collection_reserved_pages: plan.get_collection_reserved_pages(),
40 }
41 }
42}
43
44impl Defrag {
45 const NUM_BINS: usize = (Block::LINES >> 1) + 1;
46 const DEFRAG_LINE_REUSE_RATIO: f32 = 0.99;
47 const MIN_SPILL_THRESHOLD: usize = 2;
48
49 pub const fn new_histogram(&self) -> Histogram {
51 [0; Self::NUM_BINS]
52 }
53
54 pub fn add_completed_mark_histogram(&self, histogram: Histogram) {
56 self.mark_histograms.lock().push(histogram)
57 }
58
59 pub fn in_defrag(&self) -> bool {
61 self.in_defrag_collection.load(Ordering::Acquire)
62 }
63
64 #[allow(clippy::too_many_arguments)]
66 pub fn decide_whether_to_defrag(
67 &self,
68 defrag_enabled: bool,
69 emergency_collection: bool,
70 collect_whole_heap: bool,
71 collection_attempts: usize,
72 user_triggered: bool,
73 exhausted_reusable_space: bool,
74 full_heap_system_gc: bool,
75 stress_defrag: bool,
76 ) {
77 let in_defrag = defrag_enabled
78 && (emergency_collection
79 || (collection_attempts > 1)
80 || !exhausted_reusable_space
81 || stress_defrag
82 || (collect_whole_heap && user_triggered && full_heap_system_gc));
83 info!("Defrag: {}", in_defrag);
84 probe!(mmtk, immix_defrag, in_defrag);
85 self.in_defrag_collection
86 .store(in_defrag, Ordering::Release)
87 }
88
89 pub fn defrag_headroom_pages<VM: VMBinding>(&self, space: &ImmixSpace<VM>) -> usize {
91 space.get_page_resource().reserved_pages()
92 * (*space.common().options.immix_defrag_headroom_percent)
93 / 100
94 }
95
96 pub fn space_exhausted(&self) -> bool {
98 self.defrag_space_exhausted.load(Ordering::Acquire)
99 }
100
101 pub fn notify_new_clean_block(&self, copy: bool) {
103 if copy {
104 let available_clean_pages_for_defrag =
105 self.available_clean_pages_for_defrag.fetch_update(
106 Ordering::SeqCst,
107 Ordering::SeqCst,
108 |available_clean_pages_for_defrag| {
109 if available_clean_pages_for_defrag <= Block::PAGES {
110 Some(0)
111 } else {
112 Some(available_clean_pages_for_defrag - Block::PAGES)
113 }
114 },
115 );
116 if available_clean_pages_for_defrag.unwrap() <= Block::PAGES {
117 self.defrag_space_exhausted.store(true, Ordering::SeqCst);
118 }
119 }
120 }
121
122 pub fn prepare<VM: VMBinding>(&self, space: &ImmixSpace<VM>, plan_stats: StatsForDefrag) {
124 debug_assert!(space.is_defrag_enabled());
125 self.defrag_space_exhausted.store(false, Ordering::Release);
126
127 let mut available_clean_pages_for_defrag = plan_stats.total_pages as isize
130 - plan_stats.reserved_pages as isize
131 + self.defrag_headroom_pages(space) as isize;
132 if available_clean_pages_for_defrag < 0 {
133 available_clean_pages_for_defrag = 0
134 };
135
136 self.available_clean_pages_for_defrag
137 .store(available_clean_pages_for_defrag as usize, Ordering::Release);
138
139 if self.in_defrag() {
140 self.establish_defrag_spill_threshold(space)
141 }
142
143 self.available_clean_pages_for_defrag.store(
144 available_clean_pages_for_defrag as usize + plan_stats.collection_reserved_pages,
145 Ordering::Release,
146 );
147 }
148
149 fn get_available_lines<VM: VMBinding>(
151 &self,
152 space: &ImmixSpace<VM>,
153 spill_avail_histograms: &mut Histogram,
154 ) -> usize {
155 let mut total_available_lines = 0;
156 space.reusable_blocks.iterate_blocks(|block| {
157 let bucket = block.get_holes();
158 let unavailable_lines = match block.get_state() {
159 BlockState::Reusable { unavailable_lines } => unavailable_lines as usize,
160 s => unreachable!("{:?} {:?}", block, s),
161 };
162 let available_lines = Block::LINES - unavailable_lines;
163 spill_avail_histograms[bucket] += available_lines;
164 total_available_lines += available_lines;
165 });
166 total_available_lines
167 }
168
169 fn establish_defrag_spill_threshold<VM: VMBinding>(&self, space: &ImmixSpace<VM>) {
171 let mut spill_avail_histograms = self.new_histogram();
172 let clean_lines = self.get_available_lines(space, &mut spill_avail_histograms);
173 let available_lines = clean_lines
174 + (self
175 .available_clean_pages_for_defrag
176 .load(Ordering::Acquire)
177 << (LOG_BYTES_IN_PAGE as usize - Line::LOG_BYTES));
178
179 let mut required_lines = 0isize;
181 let mut limit = (available_lines as f32 / Self::DEFRAG_LINE_REUSE_RATIO) as isize;
183 let mut threshold = Block::LINES >> 1;
184 let mark_histograms = self.mark_histograms.lock();
185 for index in (Self::MIN_SPILL_THRESHOLD..Self::NUM_BINS).rev() {
190 threshold = index;
191 let this_bucket_mark = mark_histograms
193 .iter()
194 .map(|v| v[threshold] as isize)
195 .sum::<isize>();
196 let this_bucket_avail = spill_avail_histograms[threshold] as isize;
198 limit -= this_bucket_avail;
200 required_lines += this_bucket_mark;
201 if limit < required_lines {
203 break;
204 }
205 }
206 debug_assert!(threshold >= Self::MIN_SPILL_THRESHOLD);
208 self.defrag_spill_threshold
209 .store(threshold, Ordering::Release);
210 }
211
212 pub fn reset_in_defrag(&self) {
214 self.in_defrag_collection.store(false, Ordering::Release);
215 }
216}