1use atomic::Ordering;
2
3use crate::global_state::GlobalState;
4use crate::plan::Plan;
5use crate::policy::space::Space;
6use crate::scheduler::GCWorkScheduler;
7use crate::util::constants::BYTES_IN_PAGE;
8use crate::util::conversions;
9use crate::util::options::{GCTriggerSelector, Options, DEFAULT_MAX_NURSERY, DEFAULT_MIN_NURSERY};
10use crate::vm::Collection;
11use crate::vm::VMBinding;
12use crate::MMTK;
13use std::mem::MaybeUninit;
14use std::sync::atomic::{AtomicBool, AtomicUsize};
15use std::sync::Arc;
16
17pub struct GCTrigger<VM: VMBinding> {
22 plan: MaybeUninit<&'static dyn Plan<VM = VM>>,
25 pub policy: Box<dyn GCTriggerPolicy<VM>>,
27 request_flag: AtomicBool,
30 scheduler: Arc<GCWorkScheduler<VM>>,
31 options: Arc<Options>,
32 state: Arc<GlobalState>,
33}
34
35impl<VM: VMBinding> GCTrigger<VM> {
36 pub fn new(
37 options: Arc<Options>,
38 scheduler: Arc<GCWorkScheduler<VM>>,
39 state: Arc<GlobalState>,
40 ) -> Self {
41 GCTrigger {
42 plan: MaybeUninit::uninit(),
43 policy: match *options.gc_trigger {
44 GCTriggerSelector::FixedHeapSize(size) => Box::new(FixedHeapSizeTrigger {
45 total_pages: conversions::bytes_to_pages_up(size),
46 }),
47 GCTriggerSelector::DynamicHeapSize(min, max) => 'dynamic_heap_size: {
48 let min_pages = conversions::bytes_to_pages_up(min);
49 let max_pages = conversions::bytes_to_pages_up(max);
50
51 if *options.plan == crate::util::options::PlanSelector::NoGC {
52 warn!("Cannot use dynamic heap size with NoGC. Using fixed heap size trigger instead.");
53 break 'dynamic_heap_size Box::new(FixedHeapSizeTrigger {
54 total_pages: max_pages,
55 });
56 }
57
58 Box::new(MemBalancerTrigger::new(min_pages, max_pages))
59 }
60 GCTriggerSelector::Delegated => {
61 <VM::VMCollection as crate::vm::Collection<VM>>::create_gc_trigger()
62 }
63 },
64 options,
65 request_flag: AtomicBool::new(false),
66 scheduler,
67 state,
68 }
69 }
70
71 pub fn set_plan(&mut self, plan: &'static dyn Plan<VM = VM>) {
73 self.plan.write(plan);
74 }
75
76 fn plan(&self) -> &dyn Plan<VM = VM> {
77 unsafe { self.plan.assume_init() }
78 }
79
80 fn request(&self) {
83 if self.request_flag.load(Ordering::Relaxed) {
84 return;
85 }
86
87 if !self.request_flag.swap(true, Ordering::Relaxed) {
88 probe!(mmtk, gc_requested);
92 self.scheduler.request_schedule_collection();
93 }
94 }
95
96 pub fn clear_request(&self) {
99 self.request_flag.store(false, Ordering::Relaxed);
100 }
101
102 pub fn poll(&self, space_full: bool, space: Option<&dyn Space<VM>>) -> bool {
110 if !VM::VMCollection::is_collection_enabled() {
111 return false;
112 }
113
114 let plan = self.plan();
115 if self
116 .policy
117 .is_gc_required(space_full, space.map(|s| SpaceStats::new(s)), plan)
118 {
119 info!(
120 "[POLL] {}{} ({}/{} pages)",
121 if let Some(space) = space {
122 format!("{}: ", space.get_name())
123 } else {
124 "".to_string()
125 },
126 "Triggering collection",
127 plan.get_reserved_pages(),
128 plan.get_total_pages(),
129 );
130 self.request();
131 return true;
132 }
133 false
134 }
135
136 pub fn handle_user_collection_request(&self, force: bool, exhaustive: bool) -> bool {
143 if !self.plan().constraints().collects_garbage {
144 warn!("User attempted a collection request, but the plan can not do GC. The request is ignored.");
145 return false;
146 }
147
148 if force || !*self.options.ignore_system_gc && VM::VMCollection::is_collection_enabled() {
149 info!("User triggering collection");
150 if exhaustive {
152 if let Some(gen) = self.plan().generational() {
153 gen.force_full_heap_collection();
154 }
155 }
156
157 self.state
158 .user_triggered_collection
159 .store(true, Ordering::Relaxed);
160 self.request();
161 return true;
162 }
163
164 false
165 }
166
167 #[allow(unused)]
172 pub fn trigger_internal_collection_request(&self) {
173 self.state
174 .last_internal_triggered_collection
175 .store(true, Ordering::Relaxed);
176 self.state
177 .internal_triggered_collection
178 .store(true, Ordering::Relaxed);
179 self.request();
182 unimplemented!()
184 }
185
186 pub fn should_do_stress_gc(&self) -> bool {
187 Self::should_do_stress_gc_inner(&self.state, &self.options)
188 }
189
190 pub(crate) fn should_do_stress_gc_inner(state: &GlobalState, options: &Options) -> bool {
193 state.is_initialized()
194 && (state.allocation_bytes.load(Ordering::SeqCst) > *options.stress_factor)
195 }
196
197 pub fn is_heap_full(&self) -> bool {
199 self.policy.is_heap_full(self.plan())
200 }
201
202 pub fn get_max_nursery_bytes(&self) -> usize {
204 use crate::util::options::NurserySize;
205 debug_assert!(self.plan().generational().is_some());
206 match *self.options.nursery {
207 NurserySize::Bounded { min: _, max } => max,
208 NurserySize::ProportionalBounded { min: _, max } => {
209 let heap_size_bytes =
210 conversions::pages_to_bytes(self.policy.get_current_heap_size_in_pages());
211 let max_bytes = heap_size_bytes as f64 * max;
212 let max_bytes = conversions::raw_align_up(max_bytes as usize, BYTES_IN_PAGE);
213 if max_bytes > DEFAULT_MAX_NURSERY {
214 warn!("Proportional nursery with max size {} ({}) is larger than DEFAULT_MAX_NURSERY ({}). Use DEFAULT_MAX_NURSERY instead.", max, max_bytes, DEFAULT_MAX_NURSERY);
215 DEFAULT_MAX_NURSERY
216 } else {
217 max_bytes
218 }
219 }
220 NurserySize::Fixed(sz) => sz,
221 }
222 }
223
224 pub fn get_min_nursery_bytes(&self) -> usize {
226 use crate::util::options::NurserySize;
227 debug_assert!(self.plan().generational().is_some());
228 match *self.options.nursery {
229 NurserySize::Bounded { min, max: _ } => min,
230 NurserySize::ProportionalBounded { min, max: _ } => {
231 let min_bytes =
232 conversions::pages_to_bytes(self.policy.get_current_heap_size_in_pages())
233 as f64
234 * min;
235 let min_bytes = conversions::raw_align_up(min_bytes as usize, BYTES_IN_PAGE);
236 if min_bytes < DEFAULT_MIN_NURSERY {
237 warn!("Proportional nursery with min size {} ({}) is smaller than DEFAULT_MIN_NURSERY ({}). Use DEFAULT_MIN_NURSERY instead.", min, min_bytes, DEFAULT_MIN_NURSERY);
238 DEFAULT_MIN_NURSERY
239 } else {
240 min_bytes
241 }
242 }
243 NurserySize::Fixed(sz) => sz,
244 }
245 }
246
247 pub fn get_max_nursery_pages(&self) -> usize {
249 crate::util::conversions::bytes_to_pages_up(self.get_max_nursery_bytes())
250 }
251
252 pub fn get_min_nursery_pages(&self) -> usize {
254 crate::util::conversions::bytes_to_pages_up(self.get_min_nursery_bytes())
255 }
256
257 pub fn will_oom_on_alloc(&self, size: usize) -> bool {
273 let max_pages = self.policy.get_max_heap_size_in_pages();
274 let requested_pages = size >> crate::util::constants::LOG_BYTES_IN_PAGE;
275 requested_pages > max_pages
276 }
277}
278
279pub struct SpaceStats<'a, VM: VMBinding>(pub(crate) &'a dyn Space<VM>);
283
284impl<'a, VM: VMBinding> SpaceStats<'a, VM> {
285 fn new(space: &'a dyn Space<VM>) -> Self {
287 Self(space)
288 }
289
290 pub fn reserved_pages(&self) -> usize {
292 self.0.reserved_pages()
293 }
294
295 }
298
299pub trait GCTriggerPolicy<VM: VMBinding>: Sync + Send {
303 fn on_pending_allocation(&self, _pages: usize) {}
308 fn on_gc_start(&self, _mmtk: &'static MMTK<VM>) {}
310 fn on_gc_release(&self, _mmtk: &'static MMTK<VM>) {}
315 fn on_gc_end(&self, _mmtk: &'static MMTK<VM>) {}
317 fn is_gc_required(
327 &self,
328 space_full: bool,
329 space: Option<SpaceStats<VM>>,
330 plan: &dyn Plan<VM = VM>,
331 ) -> bool;
332 fn is_heap_full(&self, plan: &dyn Plan<VM = VM>) -> bool;
334 fn get_current_heap_size_in_pages(&self) -> usize;
336 fn get_max_heap_size_in_pages(&self) -> usize;
338 fn can_heap_size_grow(&self) -> bool;
340}
341
342pub struct FixedHeapSizeTrigger {
344 total_pages: usize,
345}
346impl<VM: VMBinding> GCTriggerPolicy<VM> for FixedHeapSizeTrigger {
347 fn is_gc_required(
348 &self,
349 space_full: bool,
350 space: Option<SpaceStats<VM>>,
351 plan: &dyn Plan<VM = VM>,
352 ) -> bool {
353 plan.collection_required(space_full, space)
355 }
356
357 fn is_heap_full(&self, plan: &dyn Plan<VM = VM>) -> bool {
358 plan.get_reserved_pages() > self.total_pages
360 }
361
362 fn get_current_heap_size_in_pages(&self) -> usize {
363 self.total_pages
364 }
365
366 fn get_max_heap_size_in_pages(&self) -> usize {
367 self.total_pages
368 }
369
370 fn can_heap_size_grow(&self) -> bool {
371 false
372 }
373}
374
375use atomic_refcell::AtomicRefCell;
376use std::time::Instant;
377
378pub struct MemBalancerTrigger {
384 min_heap_pages: usize,
386 max_heap_pages: usize,
388 current_heap_pages: AtomicUsize,
390 pending_pages: AtomicUsize,
393 stats: AtomicRefCell<MemBalancerStats>,
395}
396
397#[derive(Copy, Clone, Debug)]
398struct MemBalancerStats {
399 allocation_pages_prev: Option<f64>,
402 allocation_time_prev: Option<f64>,
404 collection_pages_prev: Option<f64>,
406 collection_time_prev: Option<f64>,
408
409 allocation_pages: f64,
412 allocation_time: f64,
414 collection_pages: f64,
416 collection_time: f64,
418
419 gc_start_time: Instant,
421 gc_end_time: Instant,
423
424 gc_release_live_pages: usize,
426 gc_end_live_pages: usize,
428}
429
430impl std::default::Default for MemBalancerStats {
431 fn default() -> Self {
432 let now = Instant::now();
433 Self {
434 allocation_pages_prev: None,
435 allocation_time_prev: None,
436 collection_pages_prev: None,
437 collection_time_prev: None,
438 allocation_pages: 0f64,
439 allocation_time: 0f64,
440 collection_pages: 0f64,
441 collection_time: 0f64,
442 gc_start_time: now,
443 gc_end_time: now,
444 gc_release_live_pages: 0,
445 gc_end_live_pages: 0,
446 }
447 }
448}
449
450use crate::plan::GenerationalPlan;
451
452impl MemBalancerStats {
453 fn generational_mem_stats_on_gc_start<VM: VMBinding>(
459 &mut self,
460 _plan: &dyn GenerationalPlan<VM = VM>,
461 ) {
462 }
464 fn generational_mem_stats_on_gc_release<VM: VMBinding>(
465 &mut self,
466 plan: &dyn GenerationalPlan<VM = VM>,
467 ) {
468 if !plan.is_current_gc_nursery() {
469 self.gc_release_live_pages = plan.get_mature_reserved_pages();
470
471 let promoted = self
473 .gc_release_live_pages
474 .saturating_sub(self.gc_end_live_pages);
475 self.allocation_pages = promoted as f64;
476 trace!(
477 "promoted = mature live before release {} - mature live at prev gc end {} = {}",
478 self.gc_release_live_pages,
479 self.gc_end_live_pages,
480 promoted
481 );
482 trace!(
483 "allocated pages (accumulated to) = {}",
484 self.allocation_pages
485 );
486 }
487 }
488 fn generational_mem_stats_on_gc_end<VM: VMBinding>(
490 &mut self,
491 plan: &dyn GenerationalPlan<VM = VM>,
492 ) -> bool {
493 if !plan.is_current_gc_nursery() {
494 self.gc_end_live_pages = plan.get_mature_reserved_pages();
495 self.collection_pages = self.gc_end_live_pages as f64;
497 trace!(
498 "collected pages = mature live at gc end {} - mature live at gc release {} = {}",
499 self.gc_release_live_pages,
500 self.gc_end_live_pages,
501 self.collection_pages
502 );
503 true
504 } else {
505 false
506 }
507 }
508
509 fn non_generational_mem_stats_on_gc_start<VM: VMBinding>(&mut self, mmtk: &'static MMTK<VM>) {
514 self.allocation_pages = mmtk
515 .get_plan()
516 .get_reserved_pages()
517 .saturating_sub(self.gc_end_live_pages) as f64;
518 trace!(
519 "allocated pages = used {} - live in last gc {} = {}",
520 mmtk.get_plan().get_reserved_pages(),
521 self.gc_end_live_pages,
522 self.allocation_pages
523 );
524 }
525 fn non_generational_mem_stats_on_gc_release<VM: VMBinding>(&mut self, mmtk: &'static MMTK<VM>) {
526 self.gc_release_live_pages = mmtk.get_plan().get_reserved_pages();
527 trace!("live before release = {}", self.gc_release_live_pages);
528 }
529 fn non_generational_mem_stats_on_gc_end<VM: VMBinding>(&mut self, mmtk: &'static MMTK<VM>) {
530 self.gc_end_live_pages = mmtk.get_plan().get_reserved_pages();
531 trace!("live pages = {}", self.gc_end_live_pages);
532 self.collection_pages = self.gc_end_live_pages as f64;
534 trace!(
535 "collected pages = live at gc end {} - live at gc release {} = {}",
536 self.gc_release_live_pages,
537 self.gc_end_live_pages,
538 self.collection_pages
539 );
540 }
541}
542
543impl<VM: VMBinding> GCTriggerPolicy<VM> for MemBalancerTrigger {
544 fn is_gc_required(
545 &self,
546 space_full: bool,
547 space: Option<SpaceStats<VM>>,
548 plan: &dyn Plan<VM = VM>,
549 ) -> bool {
550 plan.collection_required(space_full, space)
552 }
553
554 fn on_pending_allocation(&self, pages: usize) {
555 self.pending_pages.fetch_add(pages, Ordering::SeqCst);
556 }
557
558 fn on_gc_start(&self, mmtk: &'static MMTK<VM>) {
559 trace!("=== on_gc_start ===");
560 self.access_stats(|stats| {
561 stats.gc_start_time = Instant::now();
562 stats.allocation_time += (stats.gc_start_time - stats.gc_end_time).as_secs_f64();
563 trace!(
564 "gc_start = {:?}, allocation_time = {}",
565 stats.gc_start_time,
566 stats.allocation_time
567 );
568
569 if let Some(plan) = mmtk.get_plan().generational() {
570 stats.generational_mem_stats_on_gc_start(plan);
571 } else {
572 stats.non_generational_mem_stats_on_gc_start(mmtk);
573 }
574 });
575 }
576
577 fn on_gc_release(&self, mmtk: &'static MMTK<VM>) {
578 trace!("=== on_gc_release ===");
579 self.access_stats(|stats| {
580 if let Some(plan) = mmtk.get_plan().generational() {
581 stats.generational_mem_stats_on_gc_release(plan);
582 } else {
583 stats.non_generational_mem_stats_on_gc_release(mmtk);
584 }
585 });
586 }
587
588 fn on_gc_end(&self, mmtk: &'static MMTK<VM>) {
589 trace!("=== on_gc_end ===");
590 self.access_stats(|stats| {
591 stats.gc_end_time = Instant::now();
592 stats.collection_time += (stats.gc_end_time - stats.gc_start_time).as_secs_f64();
593 trace!(
594 "gc_end = {:?}, collection_time = {}",
595 stats.gc_end_time,
596 stats.collection_time
597 );
598
599 if let Some(plan) = mmtk.get_plan().generational() {
600 if stats.generational_mem_stats_on_gc_end(plan) {
601 self.compute_new_heap_limit(
602 mmtk.get_plan().get_reserved_pages(),
603 mmtk.get_plan().get_collection_reserved_pages()
606 + mmtk.gc_trigger.get_min_nursery_pages(),
607 stats,
608 );
609 }
610 } else {
611 stats.non_generational_mem_stats_on_gc_end(mmtk);
612 self.compute_new_heap_limit(
613 mmtk.get_plan().get_reserved_pages(),
614 mmtk.get_plan().get_collection_reserved_pages(),
615 stats,
616 );
617 }
618 });
619 self.pending_pages.store(0, Ordering::SeqCst);
621 }
622
623 fn is_heap_full(&self, plan: &dyn Plan<VM = VM>) -> bool {
624 plan.get_reserved_pages() > self.current_heap_pages.load(Ordering::Relaxed)
626 }
627
628 fn get_current_heap_size_in_pages(&self) -> usize {
629 self.current_heap_pages.load(Ordering::Relaxed)
630 }
631
632 fn get_max_heap_size_in_pages(&self) -> usize {
633 self.max_heap_pages
634 }
635
636 fn can_heap_size_grow(&self) -> bool {
637 self.current_heap_pages.load(Ordering::Relaxed) < self.max_heap_pages
638 }
639}
640impl MemBalancerTrigger {
641 fn new(min_heap_pages: usize, max_heap_pages: usize) -> Self {
642 Self {
643 min_heap_pages,
644 max_heap_pages,
645 pending_pages: AtomicUsize::new(0),
646 current_heap_pages: AtomicUsize::new(min_heap_pages),
648 stats: AtomicRefCell::new(Default::default()),
649 }
650 }
651
652 fn access_stats<F>(&self, mut f: F)
653 where
654 F: FnMut(&mut MemBalancerStats),
655 {
656 let mut stats = self.stats.borrow_mut();
657 f(&mut stats);
658 }
659
660 fn compute_new_heap_limit(
661 &self,
662 live: usize,
663 extra_reserve: usize,
664 stats: &mut MemBalancerStats,
665 ) {
666 trace!("compute new heap limit: {:?}", stats);
667
668 const ALLOCATION_SMOOTH_FACTOR: f64 = 0.95;
670 const COLLECTION_SMOOTH_FACTOR: f64 = 0.5;
671 const TUNING_FACTOR: f64 = 0.2;
672
673 let smooth = |prev: Option<f64>, cur, factor| {
675 prev.map(|p| p * factor + cur * (1.0f64 - factor))
676 .unwrap_or(cur)
677 };
678 let alloc_mem = smooth(
679 stats.allocation_pages_prev,
680 stats.allocation_pages,
681 ALLOCATION_SMOOTH_FACTOR,
682 );
683 let alloc_time = smooth(
684 stats.allocation_time_prev,
685 stats.allocation_time,
686 ALLOCATION_SMOOTH_FACTOR,
687 );
688 let gc_mem = smooth(
689 stats.collection_pages_prev,
690 stats.collection_pages,
691 COLLECTION_SMOOTH_FACTOR,
692 );
693 let gc_time = smooth(
694 stats.collection_time_prev,
695 stats.collection_time,
696 COLLECTION_SMOOTH_FACTOR,
697 );
698 trace!(
699 "after smoothing, alloc mem = {}, alloc_time = {}",
700 alloc_mem,
701 alloc_time
702 );
703 trace!(
704 "after smoothing, gc mem = {}, gc_time = {}",
705 gc_mem,
706 gc_time
707 );
708
709 stats.allocation_pages_prev = Some(stats.allocation_pages);
711 stats.allocation_pages = 0f64;
712 stats.allocation_time_prev = Some(stats.allocation_time);
713 stats.allocation_time = 0f64;
714 stats.collection_pages_prev = Some(stats.collection_pages);
715 stats.collection_pages = 0f64;
716 stats.collection_time_prev = Some(stats.collection_time);
717 stats.collection_time = 0f64;
718
719 let e: f64 = if alloc_mem != 0f64 && gc_mem != 0f64 && alloc_time != 0f64 && gc_time != 0f64
721 {
722 let mut e = live as f64;
723 e *= alloc_mem / alloc_time;
724 e /= TUNING_FACTOR;
725 e /= gc_mem / gc_time;
726 e.sqrt()
727 } else {
728 (live as f64 * 4096f64).sqrt()
730 };
731
732 let pending_pages = self.pending_pages.load(Ordering::SeqCst);
734
735 let optimal_heap = live + e as usize + extra_reserve + pending_pages;
737 trace!(
738 "optimal = live {} + sqrt(live) {} + extra {}",
739 live,
740 e,
741 extra_reserve
742 );
743
744 let new_heap = optimal_heap.clamp(self.min_heap_pages, self.max_heap_pages);
746 debug!(
747 "MemBalander: new heap limit = {} pages (optimal = {}, clamped to [{}, {}])",
748 new_heap, optimal_heap, self.min_heap_pages, self.max_heap_pages
749 );
750 self.current_heap_pages.store(new_heap, Ordering::Relaxed);
751 }
752}