1use atomic::Ordering;
2
3use crate::global_state::GlobalState;
4use crate::plan::Plan;
5use crate::policy::space::Space;
6use crate::scheduler::GCWorkScheduler;
7use crate::util::constants::BYTES_IN_PAGE;
8use crate::util::conversions;
9use crate::util::options::{GCTriggerSelector, Options, DEFAULT_MAX_NURSERY, DEFAULT_MIN_NURSERY};
10use crate::vm::Collection;
11use crate::vm::VMBinding;
12use crate::MMTK;
13use std::mem::MaybeUninit;
14use std::sync::atomic::{AtomicBool, AtomicUsize};
15use std::sync::Arc;
16
17pub struct GCTrigger<VM: VMBinding> {
22 plan: MaybeUninit<&'static dyn Plan<VM = VM>>,
25 pub policy: Box<dyn GCTriggerPolicy<VM>>,
27 request_flag: AtomicBool,
30 scheduler: Arc<GCWorkScheduler<VM>>,
31 options: Arc<Options>,
32 state: Arc<GlobalState>,
33}
34
35impl<VM: VMBinding> GCTrigger<VM> {
36 pub fn new(
37 options: Arc<Options>,
38 scheduler: Arc<GCWorkScheduler<VM>>,
39 state: Arc<GlobalState>,
40 ) -> Self {
41 GCTrigger {
42 plan: MaybeUninit::uninit(),
43 policy: match *options.gc_trigger {
44 GCTriggerSelector::FixedHeapSize(size) => Box::new(FixedHeapSizeTrigger {
45 total_pages: conversions::bytes_to_pages_up(size),
46 }),
47 GCTriggerSelector::DynamicHeapSize(min, max) => 'dynamic_heap_size: {
48 let min_pages = conversions::bytes_to_pages_up(min);
49 let max_pages = conversions::bytes_to_pages_up(max);
50
51 if *options.plan == crate::util::options::PlanSelector::NoGC {
52 warn!("Cannot use dynamic heap size with NoGC. Using fixed heap size trigger instead.");
53 break 'dynamic_heap_size Box::new(FixedHeapSizeTrigger {
54 total_pages: max_pages,
55 });
56 }
57
58 Box::new(MemBalancerTrigger::new(min_pages, max_pages))
59 }
60 GCTriggerSelector::Delegated => {
61 <VM::VMCollection as crate::vm::Collection<VM>>::create_gc_trigger()
62 }
63 },
64 options,
65 request_flag: AtomicBool::new(false),
66 scheduler,
67 state,
68 }
69 }
70
71 pub fn set_plan(&mut self, plan: &'static dyn Plan<VM = VM>) {
73 self.plan.write(plan);
74 }
75
76 fn plan(&self) -> &dyn Plan<VM = VM> {
77 unsafe { self.plan.assume_init() }
78 }
79
80 fn request(&self) {
83 if self.request_flag.load(Ordering::Relaxed) {
84 return;
85 }
86
87 if !self.request_flag.swap(true, Ordering::Relaxed) {
88 probe!(mmtk, gc_requested);
92 self.scheduler.request_schedule_collection();
93 }
94 }
95
96 pub fn clear_request(&self) {
99 self.request_flag.store(false, Ordering::Relaxed);
100 }
101
102 pub fn poll(&self, space_full: bool, space: Option<&dyn Space<VM>>) -> bool {
110 if !VM::VMCollection::is_collection_enabled() {
111 return false;
112 }
113
114 let plan = self.plan();
115 if self
116 .policy
117 .is_gc_required(space_full, space.map(|s| SpaceStats::new(s)), plan)
118 {
119 info!(
120 "[POLL] {}{} ({}/{} pages)",
121 if let Some(space) = space {
122 format!("{}: ", space.get_name())
123 } else {
124 "".to_string()
125 },
126 "Triggering collection",
127 plan.get_reserved_pages(),
128 plan.get_total_pages(),
129 );
130 self.request();
131 return true;
132 }
133 false
134 }
135
136 pub fn handle_user_collection_request(&self, force: bool, exhaustive: bool) -> bool {
143 if !self.plan().constraints().collects_garbage {
144 warn!("User attempted a collection request, but the plan can not do GC. The request is ignored.");
145 return false;
146 }
147
148 if force || !*self.options.ignore_system_gc && VM::VMCollection::is_collection_enabled() {
149 info!("User triggering collection");
150 if exhaustive {
152 if let Some(gen) = self.plan().generational() {
153 gen.force_full_heap_collection();
154 }
155 }
156
157 self.state
158 .user_triggered_collection
159 .store(true, Ordering::Relaxed);
160 self.request();
161 return true;
162 }
163
164 false
165 }
166
167 #[allow(unused)]
172 pub fn trigger_internal_collection_request(&self) {
173 self.state
174 .last_internal_triggered_collection
175 .store(true, Ordering::Relaxed);
176 self.state
177 .internal_triggered_collection
178 .store(true, Ordering::Relaxed);
179 self.request();
182 unimplemented!()
184 }
185
186 pub fn should_do_stress_gc(&self) -> bool {
187 Self::should_do_stress_gc_inner(&self.state, &self.options)
188 }
189
190 pub(crate) fn should_do_stress_gc_inner(state: &GlobalState, options: &Options) -> bool {
193 state.is_initialized()
194 && (state.allocation_bytes.load(Ordering::SeqCst) > *options.stress_factor)
195 }
196
197 pub fn is_heap_full(&self) -> bool {
199 self.policy.is_heap_full(self.plan())
200 }
201
202 pub fn get_max_nursery_bytes(&self) -> usize {
204 use crate::util::options::NurserySize;
205 debug_assert!(self.plan().generational().is_some());
206 match *self.options.nursery {
207 NurserySize::Bounded { min: _, max } => max,
208 NurserySize::ProportionalBounded { min: _, max } => {
209 let heap_size_bytes =
210 conversions::pages_to_bytes(self.policy.get_current_heap_size_in_pages());
211 let max_bytes = heap_size_bytes as f64 * max;
212 let max_bytes = conversions::raw_align_up(max_bytes as usize, BYTES_IN_PAGE);
213 if max_bytes > DEFAULT_MAX_NURSERY {
214 warn!("Proportional nursery with max size {} ({}) is larger than DEFAULT_MAX_NURSERY ({}). Use DEFAULT_MAX_NURSERY instead.", max, max_bytes, DEFAULT_MAX_NURSERY);
215 DEFAULT_MAX_NURSERY
216 } else {
217 max_bytes
218 }
219 }
220 NurserySize::Fixed(sz) => sz,
221 }
222 }
223
224 pub fn get_min_nursery_bytes(&self) -> usize {
226 use crate::util::options::NurserySize;
227 debug_assert!(self.plan().generational().is_some());
228 match *self.options.nursery {
229 NurserySize::Bounded { min, max: _ } => min,
230 NurserySize::ProportionalBounded { min, max: _ } => {
231 let min_bytes =
232 conversions::pages_to_bytes(self.policy.get_current_heap_size_in_pages())
233 as f64
234 * min;
235 let min_bytes = conversions::raw_align_up(min_bytes as usize, BYTES_IN_PAGE);
236 if min_bytes < DEFAULT_MIN_NURSERY {
237 warn!("Proportional nursery with min size {} ({}) is smaller than DEFAULT_MIN_NURSERY ({}). Use DEFAULT_MIN_NURSERY instead.", min, min_bytes, DEFAULT_MIN_NURSERY);
238 DEFAULT_MIN_NURSERY
239 } else {
240 min_bytes
241 }
242 }
243 NurserySize::Fixed(sz) => sz,
244 }
245 }
246
247 pub fn get_max_nursery_pages(&self) -> usize {
249 crate::util::conversions::bytes_to_pages_up(self.get_max_nursery_bytes())
250 }
251
252 pub fn get_min_nursery_pages(&self) -> usize {
254 crate::util::conversions::bytes_to_pages_up(self.get_min_nursery_bytes())
255 }
256}
257
258pub struct SpaceStats<'a, VM: VMBinding>(pub(crate) &'a dyn Space<VM>);
262
263impl<'a, VM: VMBinding> SpaceStats<'a, VM> {
264 fn new(space: &'a dyn Space<VM>) -> Self {
266 Self(space)
267 }
268
269 pub fn reserved_pages(&self) -> usize {
271 self.0.reserved_pages()
272 }
273
274 }
277
278pub trait GCTriggerPolicy<VM: VMBinding>: Sync + Send {
282 fn on_pending_allocation(&self, _pages: usize) {}
287 fn on_gc_start(&self, _mmtk: &'static MMTK<VM>) {}
289 fn on_gc_release(&self, _mmtk: &'static MMTK<VM>) {}
294 fn on_gc_end(&self, _mmtk: &'static MMTK<VM>) {}
296 fn is_gc_required(
306 &self,
307 space_full: bool,
308 space: Option<SpaceStats<VM>>,
309 plan: &dyn Plan<VM = VM>,
310 ) -> bool;
311 fn is_heap_full(&self, plan: &dyn Plan<VM = VM>) -> bool;
313 fn get_current_heap_size_in_pages(&self) -> usize;
315 fn get_max_heap_size_in_pages(&self) -> usize;
317 fn can_heap_size_grow(&self) -> bool;
319}
320
321pub struct FixedHeapSizeTrigger {
323 total_pages: usize,
324}
325impl<VM: VMBinding> GCTriggerPolicy<VM> for FixedHeapSizeTrigger {
326 fn is_gc_required(
327 &self,
328 space_full: bool,
329 space: Option<SpaceStats<VM>>,
330 plan: &dyn Plan<VM = VM>,
331 ) -> bool {
332 plan.collection_required(space_full, space)
334 }
335
336 fn is_heap_full(&self, plan: &dyn Plan<VM = VM>) -> bool {
337 plan.get_reserved_pages() > self.total_pages
339 }
340
341 fn get_current_heap_size_in_pages(&self) -> usize {
342 self.total_pages
343 }
344
345 fn get_max_heap_size_in_pages(&self) -> usize {
346 self.total_pages
347 }
348
349 fn can_heap_size_grow(&self) -> bool {
350 false
351 }
352}
353
354use atomic_refcell::AtomicRefCell;
355use std::time::Instant;
356
357pub struct MemBalancerTrigger {
363 min_heap_pages: usize,
365 max_heap_pages: usize,
367 current_heap_pages: AtomicUsize,
369 pending_pages: AtomicUsize,
372 stats: AtomicRefCell<MemBalancerStats>,
374}
375
376#[derive(Copy, Clone, Debug)]
377struct MemBalancerStats {
378 allocation_pages_prev: Option<f64>,
381 allocation_time_prev: Option<f64>,
383 collection_pages_prev: Option<f64>,
385 collection_time_prev: Option<f64>,
387
388 allocation_pages: f64,
391 allocation_time: f64,
393 collection_pages: f64,
395 collection_time: f64,
397
398 gc_start_time: Instant,
400 gc_end_time: Instant,
402
403 gc_release_live_pages: usize,
405 gc_end_live_pages: usize,
407}
408
409impl std::default::Default for MemBalancerStats {
410 fn default() -> Self {
411 let now = Instant::now();
412 Self {
413 allocation_pages_prev: None,
414 allocation_time_prev: None,
415 collection_pages_prev: None,
416 collection_time_prev: None,
417 allocation_pages: 0f64,
418 allocation_time: 0f64,
419 collection_pages: 0f64,
420 collection_time: 0f64,
421 gc_start_time: now,
422 gc_end_time: now,
423 gc_release_live_pages: 0,
424 gc_end_live_pages: 0,
425 }
426 }
427}
428
429use crate::plan::GenerationalPlan;
430
431impl MemBalancerStats {
432 fn generational_mem_stats_on_gc_start<VM: VMBinding>(
438 &mut self,
439 _plan: &dyn GenerationalPlan<VM = VM>,
440 ) {
441 }
443 fn generational_mem_stats_on_gc_release<VM: VMBinding>(
444 &mut self,
445 plan: &dyn GenerationalPlan<VM = VM>,
446 ) {
447 if !plan.is_current_gc_nursery() {
448 self.gc_release_live_pages = plan.get_mature_reserved_pages();
449
450 let promoted = self
452 .gc_release_live_pages
453 .saturating_sub(self.gc_end_live_pages);
454 self.allocation_pages = promoted as f64;
455 trace!(
456 "promoted = mature live before release {} - mature live at prev gc end {} = {}",
457 self.gc_release_live_pages,
458 self.gc_end_live_pages,
459 promoted
460 );
461 trace!(
462 "allocated pages (accumulated to) = {}",
463 self.allocation_pages
464 );
465 }
466 }
467 fn generational_mem_stats_on_gc_end<VM: VMBinding>(
469 &mut self,
470 plan: &dyn GenerationalPlan<VM = VM>,
471 ) -> bool {
472 if !plan.is_current_gc_nursery() {
473 self.gc_end_live_pages = plan.get_mature_reserved_pages();
474 self.collection_pages = self.gc_end_live_pages as f64;
476 trace!(
477 "collected pages = mature live at gc end {} - mature live at gc release {} = {}",
478 self.gc_release_live_pages,
479 self.gc_end_live_pages,
480 self.collection_pages
481 );
482 true
483 } else {
484 false
485 }
486 }
487
488 fn non_generational_mem_stats_on_gc_start<VM: VMBinding>(&mut self, mmtk: &'static MMTK<VM>) {
493 self.allocation_pages = mmtk
494 .get_plan()
495 .get_reserved_pages()
496 .saturating_sub(self.gc_end_live_pages) as f64;
497 trace!(
498 "allocated pages = used {} - live in last gc {} = {}",
499 mmtk.get_plan().get_reserved_pages(),
500 self.gc_end_live_pages,
501 self.allocation_pages
502 );
503 }
504 fn non_generational_mem_stats_on_gc_release<VM: VMBinding>(&mut self, mmtk: &'static MMTK<VM>) {
505 self.gc_release_live_pages = mmtk.get_plan().get_reserved_pages();
506 trace!("live before release = {}", self.gc_release_live_pages);
507 }
508 fn non_generational_mem_stats_on_gc_end<VM: VMBinding>(&mut self, mmtk: &'static MMTK<VM>) {
509 self.gc_end_live_pages = mmtk.get_plan().get_reserved_pages();
510 trace!("live pages = {}", self.gc_end_live_pages);
511 self.collection_pages = self.gc_end_live_pages as f64;
513 trace!(
514 "collected pages = live at gc end {} - live at gc release {} = {}",
515 self.gc_release_live_pages,
516 self.gc_end_live_pages,
517 self.collection_pages
518 );
519 }
520}
521
522impl<VM: VMBinding> GCTriggerPolicy<VM> for MemBalancerTrigger {
523 fn is_gc_required(
524 &self,
525 space_full: bool,
526 space: Option<SpaceStats<VM>>,
527 plan: &dyn Plan<VM = VM>,
528 ) -> bool {
529 plan.collection_required(space_full, space)
531 }
532
533 fn on_pending_allocation(&self, pages: usize) {
534 self.pending_pages.fetch_add(pages, Ordering::SeqCst);
535 }
536
537 fn on_gc_start(&self, mmtk: &'static MMTK<VM>) {
538 trace!("=== on_gc_start ===");
539 self.access_stats(|stats| {
540 stats.gc_start_time = Instant::now();
541 stats.allocation_time += (stats.gc_start_time - stats.gc_end_time).as_secs_f64();
542 trace!(
543 "gc_start = {:?}, allocation_time = {}",
544 stats.gc_start_time,
545 stats.allocation_time
546 );
547
548 if let Some(plan) = mmtk.get_plan().generational() {
549 stats.generational_mem_stats_on_gc_start(plan);
550 } else {
551 stats.non_generational_mem_stats_on_gc_start(mmtk);
552 }
553 });
554 }
555
556 fn on_gc_release(&self, mmtk: &'static MMTK<VM>) {
557 trace!("=== on_gc_release ===");
558 self.access_stats(|stats| {
559 if let Some(plan) = mmtk.get_plan().generational() {
560 stats.generational_mem_stats_on_gc_release(plan);
561 } else {
562 stats.non_generational_mem_stats_on_gc_release(mmtk);
563 }
564 });
565 }
566
567 fn on_gc_end(&self, mmtk: &'static MMTK<VM>) {
568 trace!("=== on_gc_end ===");
569 self.access_stats(|stats| {
570 stats.gc_end_time = Instant::now();
571 stats.collection_time += (stats.gc_end_time - stats.gc_start_time).as_secs_f64();
572 trace!(
573 "gc_end = {:?}, collection_time = {}",
574 stats.gc_end_time,
575 stats.collection_time
576 );
577
578 if let Some(plan) = mmtk.get_plan().generational() {
579 if stats.generational_mem_stats_on_gc_end(plan) {
580 self.compute_new_heap_limit(
581 mmtk.get_plan().get_reserved_pages(),
582 mmtk.get_plan().get_collection_reserved_pages()
585 + mmtk.gc_trigger.get_min_nursery_pages(),
586 stats,
587 );
588 }
589 } else {
590 stats.non_generational_mem_stats_on_gc_end(mmtk);
591 self.compute_new_heap_limit(
592 mmtk.get_plan().get_reserved_pages(),
593 mmtk.get_plan().get_collection_reserved_pages(),
594 stats,
595 );
596 }
597 });
598 self.pending_pages.store(0, Ordering::SeqCst);
600 }
601
602 fn is_heap_full(&self, plan: &dyn Plan<VM = VM>) -> bool {
603 plan.get_reserved_pages() > self.current_heap_pages.load(Ordering::Relaxed)
605 }
606
607 fn get_current_heap_size_in_pages(&self) -> usize {
608 self.current_heap_pages.load(Ordering::Relaxed)
609 }
610
611 fn get_max_heap_size_in_pages(&self) -> usize {
612 self.max_heap_pages
613 }
614
615 fn can_heap_size_grow(&self) -> bool {
616 self.current_heap_pages.load(Ordering::Relaxed) < self.max_heap_pages
617 }
618}
619impl MemBalancerTrigger {
620 fn new(min_heap_pages: usize, max_heap_pages: usize) -> Self {
621 Self {
622 min_heap_pages,
623 max_heap_pages,
624 pending_pages: AtomicUsize::new(0),
625 current_heap_pages: AtomicUsize::new(min_heap_pages),
627 stats: AtomicRefCell::new(Default::default()),
628 }
629 }
630
631 fn access_stats<F>(&self, mut f: F)
632 where
633 F: FnMut(&mut MemBalancerStats),
634 {
635 let mut stats = self.stats.borrow_mut();
636 f(&mut stats);
637 }
638
639 fn compute_new_heap_limit(
640 &self,
641 live: usize,
642 extra_reserve: usize,
643 stats: &mut MemBalancerStats,
644 ) {
645 trace!("compute new heap limit: {:?}", stats);
646
647 const ALLOCATION_SMOOTH_FACTOR: f64 = 0.95;
649 const COLLECTION_SMOOTH_FACTOR: f64 = 0.5;
650 const TUNING_FACTOR: f64 = 0.2;
651
652 let smooth = |prev: Option<f64>, cur, factor| {
654 prev.map(|p| p * factor + cur * (1.0f64 - factor))
655 .unwrap_or(cur)
656 };
657 let alloc_mem = smooth(
658 stats.allocation_pages_prev,
659 stats.allocation_pages,
660 ALLOCATION_SMOOTH_FACTOR,
661 );
662 let alloc_time = smooth(
663 stats.allocation_time_prev,
664 stats.allocation_time,
665 ALLOCATION_SMOOTH_FACTOR,
666 );
667 let gc_mem = smooth(
668 stats.collection_pages_prev,
669 stats.collection_pages,
670 COLLECTION_SMOOTH_FACTOR,
671 );
672 let gc_time = smooth(
673 stats.collection_time_prev,
674 stats.collection_time,
675 COLLECTION_SMOOTH_FACTOR,
676 );
677 trace!(
678 "after smoothing, alloc mem = {}, alloc_time = {}",
679 alloc_mem,
680 alloc_time
681 );
682 trace!(
683 "after smoothing, gc mem = {}, gc_time = {}",
684 gc_mem,
685 gc_time
686 );
687
688 stats.allocation_pages_prev = Some(stats.allocation_pages);
690 stats.allocation_pages = 0f64;
691 stats.allocation_time_prev = Some(stats.allocation_time);
692 stats.allocation_time = 0f64;
693 stats.collection_pages_prev = Some(stats.collection_pages);
694 stats.collection_pages = 0f64;
695 stats.collection_time_prev = Some(stats.collection_time);
696 stats.collection_time = 0f64;
697
698 let e: f64 = if alloc_mem != 0f64 && gc_mem != 0f64 && alloc_time != 0f64 && gc_time != 0f64
700 {
701 let mut e = live as f64;
702 e *= alloc_mem / alloc_time;
703 e /= TUNING_FACTOR;
704 e /= gc_mem / gc_time;
705 e.sqrt()
706 } else {
707 (live as f64 * 4096f64).sqrt()
709 };
710
711 let pending_pages = self.pending_pages.load(Ordering::SeqCst);
713
714 let optimal_heap = live + e as usize + extra_reserve + pending_pages;
716 trace!(
717 "optimal = live {} + sqrt(live) {} + extra {}",
718 live,
719 e,
720 extra_reserve
721 );
722
723 let new_heap = optimal_heap.clamp(self.min_heap_pages, self.max_heap_pages);
725 debug!(
726 "MemBalander: new heap limit = {} pages (optimal = {}, clamped to [{}, {}])",
727 new_heap, optimal_heap, self.min_heap_pages, self.max_heap_pages
728 );
729 self.current_heap_pages.store(new_heap, Ordering::Relaxed);
730 }
731}