mmtk/util/heap/
gc_trigger.rs

1use atomic::Ordering;
2
3use crate::global_state::GlobalState;
4use crate::plan::Plan;
5use crate::policy::space::Space;
6use crate::scheduler::GCWorkScheduler;
7use crate::util::constants::BYTES_IN_PAGE;
8use crate::util::conversions;
9use crate::util::options::{GCTriggerSelector, Options, DEFAULT_MAX_NURSERY, DEFAULT_MIN_NURSERY};
10use crate::vm::Collection;
11use crate::vm::VMBinding;
12use crate::MMTK;
13use std::mem::MaybeUninit;
14use std::sync::atomic::{AtomicBool, AtomicUsize};
15use std::sync::Arc;
16
17/// GCTrigger is responsible for triggering GCs based on the given policy.
18/// All the decisions about heap limit and GC triggering should be resolved here.
19/// Depending on the actual policy, we may either forward the calls either to the plan
20/// or to the binding/runtime.
21pub struct GCTrigger<VM: VMBinding> {
22    /// The current plan. This is uninitialized when we create it, and later initialized
23    /// once we have a fixed address for the plan.
24    plan: MaybeUninit<&'static dyn Plan<VM = VM>>,
25    /// The triggering policy.
26    pub policy: Box<dyn GCTriggerPolicy<VM>>,
27    /// Set by mutators to trigger GC.  It is atomic so that mutators can check if GC has already
28    /// been requested efficiently in `poll` without acquiring any mutex.
29    request_flag: AtomicBool,
30    scheduler: Arc<GCWorkScheduler<VM>>,
31    options: Arc<Options>,
32    state: Arc<GlobalState>,
33}
34
35impl<VM: VMBinding> GCTrigger<VM> {
36    pub fn new(
37        options: Arc<Options>,
38        scheduler: Arc<GCWorkScheduler<VM>>,
39        state: Arc<GlobalState>,
40    ) -> Self {
41        GCTrigger {
42            plan: MaybeUninit::uninit(),
43            policy: match *options.gc_trigger {
44                GCTriggerSelector::FixedHeapSize(size) => Box::new(FixedHeapSizeTrigger {
45                    total_pages: conversions::bytes_to_pages_up(size),
46                }),
47                GCTriggerSelector::DynamicHeapSize(min, max) => 'dynamic_heap_size: {
48                    let min_pages = conversions::bytes_to_pages_up(min);
49                    let max_pages = conversions::bytes_to_pages_up(max);
50
51                    if *options.plan == crate::util::options::PlanSelector::NoGC {
52                        warn!("Cannot use dynamic heap size with NoGC.  Using fixed heap size trigger instead.");
53                        break 'dynamic_heap_size Box::new(FixedHeapSizeTrigger {
54                            total_pages: max_pages,
55                        });
56                    }
57
58                    Box::new(MemBalancerTrigger::new(min_pages, max_pages))
59                }
60                GCTriggerSelector::Delegated => {
61                    <VM::VMCollection as crate::vm::Collection<VM>>::create_gc_trigger()
62                }
63            },
64            options,
65            request_flag: AtomicBool::new(false),
66            scheduler,
67            state,
68        }
69    }
70
71    /// Set the plan. This is called in `create_plan()` after we created a boxed plan.
72    pub fn set_plan(&mut self, plan: &'static dyn Plan<VM = VM>) {
73        self.plan.write(plan);
74    }
75
76    fn plan(&self) -> &dyn Plan<VM = VM> {
77        unsafe { self.plan.assume_init() }
78    }
79
80    /// Request a GC.  Called by mutators when polling (during allocation) and when handling user
81    /// GC requests (e.g. `System.gc();` in Java).
82    fn request(&self) {
83        if self.request_flag.load(Ordering::Relaxed) {
84            return;
85        }
86
87        if !self.request_flag.swap(true, Ordering::Relaxed) {
88            // `GCWorkScheduler::request_schedule_collection` needs to hold a mutex to communicate
89            // with GC workers, which is expensive for functions like `poll`.  We use the atomic
90            // flag `request_flag` to elide the need to acquire the mutex in subsequent calls.
91            probe!(mmtk, gc_requested);
92            self.scheduler.request_schedule_collection();
93        }
94    }
95
96    /// Clear the "GC requested" flag so that mutators can trigger the next GC.
97    /// Called by a GC worker when all mutators have come to a stop.
98    pub fn clear_request(&self) {
99        self.request_flag.store(false, Ordering::Relaxed);
100    }
101
102    /// This method is called periodically by the allocation subsystem
103    /// (by default, each time a page is consumed), and provides the
104    /// collector with an opportunity to collect.
105    ///
106    /// Arguments:
107    /// * `space_full`: Space request failed, must recover pages within 'space'.
108    /// * `space`: The space that triggered the poll. This could `None` if the poll is not triggered by a space.
109    pub fn poll(&self, space_full: bool, space: Option<&dyn Space<VM>>) -> bool {
110        if !VM::VMCollection::is_collection_enabled() {
111            return false;
112        }
113
114        let plan = self.plan();
115        if self
116            .policy
117            .is_gc_required(space_full, space.map(|s| SpaceStats::new(s)), plan)
118        {
119            info!(
120                "[POLL] {}{} ({}/{} pages)",
121                if let Some(space) = space {
122                    format!("{}: ", space.get_name())
123                } else {
124                    "".to_string()
125                },
126                "Triggering collection",
127                plan.get_reserved_pages(),
128                plan.get_total_pages(),
129            );
130            self.request();
131            return true;
132        }
133        false
134    }
135
136    /// This method is called when the user manually requests a collection, such as `System.gc()` in Java.
137    /// Returns true if a collection is actually requested.
138    ///
139    /// # Arguments
140    /// * `force`: If true, we force a collection regardless of the settings. If false, we only trigger a collection if the settings allow it.
141    /// * `exhaustive`: If true, we try to make the collection exhaustive (e.g. full heap collection). If false, the collection kind is determined internally.
142    pub fn handle_user_collection_request(&self, force: bool, exhaustive: bool) -> bool {
143        if !self.plan().constraints().collects_garbage {
144            warn!("User attempted a collection request, but the plan can not do GC. The request is ignored.");
145            return false;
146        }
147
148        if force || !*self.options.ignore_system_gc && VM::VMCollection::is_collection_enabled() {
149            info!("User triggering collection");
150            // TODO: this may not work reliably. If a GC has been triggered, this will not force it to be a full heap GC.
151            if exhaustive {
152                if let Some(gen) = self.plan().generational() {
153                    gen.force_full_heap_collection();
154                }
155            }
156
157            self.state
158                .user_triggered_collection
159                .store(true, Ordering::Relaxed);
160            self.request();
161            return true;
162        }
163
164        false
165    }
166
167    /// MMTK has requested stop-the-world activity (e.g., stw within a concurrent gc).
168    // TODO: We should use this for concurrent GC. E.g. in concurrent Immix, when the initial mark is done, we
169    // can use this function to immediately trigger the final mark pause. The current implementation uses
170    // normal collection_required check, which may delay the final mark unnecessarily.
171    #[allow(unused)]
172    pub fn trigger_internal_collection_request(&self) {
173        self.state
174            .last_internal_triggered_collection
175            .store(true, Ordering::Relaxed);
176        self.state
177            .internal_triggered_collection
178            .store(true, Ordering::Relaxed);
179        // TODO: The current `request()` is probably incorrect for internally triggered GC.
180        // Consider removing functions related to "internal triggered collection".
181        self.request();
182        // TODO: Make sure this function works correctly for concurrent GC.
183        unimplemented!()
184    }
185
186    pub fn should_do_stress_gc(&self) -> bool {
187        Self::should_do_stress_gc_inner(&self.state, &self.options)
188    }
189
190    /// Check if we should do a stress GC now. If GC is initialized and the allocation bytes exceeds
191    /// the stress factor, we should do a stress GC.
192    pub(crate) fn should_do_stress_gc_inner(state: &GlobalState, options: &Options) -> bool {
193        state.is_initialized()
194            && (state.allocation_bytes.load(Ordering::SeqCst) > *options.stress_factor)
195    }
196
197    /// Check if the heap is full
198    pub fn is_heap_full(&self) -> bool {
199        self.policy.is_heap_full(self.plan())
200    }
201
202    /// Return upper bound of the nursery size (in number of bytes)
203    pub fn get_max_nursery_bytes(&self) -> usize {
204        use crate::util::options::NurserySize;
205        debug_assert!(self.plan().generational().is_some());
206        match *self.options.nursery {
207            NurserySize::Bounded { min: _, max } => max,
208            NurserySize::ProportionalBounded { min: _, max } => {
209                let heap_size_bytes =
210                    conversions::pages_to_bytes(self.policy.get_current_heap_size_in_pages());
211                let max_bytes = heap_size_bytes as f64 * max;
212                let max_bytes = conversions::raw_align_up(max_bytes as usize, BYTES_IN_PAGE);
213                if max_bytes > DEFAULT_MAX_NURSERY {
214                    warn!("Proportional nursery with max size {} ({}) is larger than DEFAULT_MAX_NURSERY ({}). Use DEFAULT_MAX_NURSERY instead.", max, max_bytes, DEFAULT_MAX_NURSERY);
215                    DEFAULT_MAX_NURSERY
216                } else {
217                    max_bytes
218                }
219            }
220            NurserySize::Fixed(sz) => sz,
221        }
222    }
223
224    /// Return lower bound of the nursery size (in number of bytes)
225    pub fn get_min_nursery_bytes(&self) -> usize {
226        use crate::util::options::NurserySize;
227        debug_assert!(self.plan().generational().is_some());
228        match *self.options.nursery {
229            NurserySize::Bounded { min, max: _ } => min,
230            NurserySize::ProportionalBounded { min, max: _ } => {
231                let min_bytes =
232                    conversions::pages_to_bytes(self.policy.get_current_heap_size_in_pages())
233                        as f64
234                        * min;
235                let min_bytes = conversions::raw_align_up(min_bytes as usize, BYTES_IN_PAGE);
236                if min_bytes < DEFAULT_MIN_NURSERY {
237                    warn!("Proportional nursery with min size {} ({}) is smaller than DEFAULT_MIN_NURSERY ({}). Use DEFAULT_MIN_NURSERY instead.", min, min_bytes, DEFAULT_MIN_NURSERY);
238                    DEFAULT_MIN_NURSERY
239                } else {
240                    min_bytes
241                }
242            }
243            NurserySize::Fixed(sz) => sz,
244        }
245    }
246
247    /// Return upper bound of the nursery size (in number of pages)
248    pub fn get_max_nursery_pages(&self) -> usize {
249        crate::util::conversions::bytes_to_pages_up(self.get_max_nursery_bytes())
250    }
251
252    /// Return lower bound of the nursery size (in number of pages)
253    pub fn get_min_nursery_pages(&self) -> usize {
254        crate::util::conversions::bytes_to_pages_up(self.get_min_nursery_bytes())
255    }
256
257    /// A check for the obvious out-of-memory case: if the requested size is larger than
258    /// the heap size, it is definitely an OOM. We would like to identify that, and
259    /// allows the binding to deal with OOM. Without this check, we will attempt
260    /// to allocate from the page resource. If the requested size is unrealistically large
261    /// (such as `usize::MAX`), it breaks the assumptions of our implementation of
262    /// page resource, vm map, etc. This check prevents that, and allows us to
263    /// handle the OOM case.
264    /// Each allocator that may request an arbitrary size should call this method before
265    /// acquring memory from the space. For example, bump pointer allocator and large object
266    /// allocator need to call this method. On the other hand, allocators that only allocate
267    /// memory in fixed size blocks do not need to call this method.
268    /// An allocator should call this method before doing any computation on the size to
269    /// avoid arithmatic overflow. If we have to do computation in the allocation fastpath and
270    /// overflow happens there, there is nothing we can do about it.
271    /// Return a boolean to indicate if we will be out of memory, determined by the check.
272    pub fn will_oom_on_alloc(&self, size: usize) -> bool {
273        let max_pages = self.policy.get_max_heap_size_in_pages();
274        let requested_pages = size >> crate::util::constants::LOG_BYTES_IN_PAGE;
275        requested_pages > max_pages
276    }
277}
278
279/// Provides statistics about the space. This is exposed to bindings, as it is used
280/// in both [`crate::plan::Plan`] and [`GCTriggerPolicy`].
281// This type exists so we do not need to expose the `Space` trait to the bindings.
282pub struct SpaceStats<'a, VM: VMBinding>(pub(crate) &'a dyn Space<VM>);
283
284impl<'a, VM: VMBinding> SpaceStats<'a, VM> {
285    /// Create new SpaceStats.
286    fn new(space: &'a dyn Space<VM>) -> Self {
287        Self(space)
288    }
289
290    /// Get the number of reserved pages for the space.
291    pub fn reserved_pages(&self) -> usize {
292        self.0.reserved_pages()
293    }
294
295    // We may expose more methods to bindings if they need more information for implementing GC triggers.
296    // But we should never expose `Space` itself.
297}
298
299/// This trait describes a GC trigger policy. A triggering policy have hooks to be informed about
300/// GC start/end so they can collect some statistics about GC and allocation. The policy needs to
301/// decide the (current) heap limit and decide whether a GC should be performed.
302pub trait GCTriggerPolicy<VM: VMBinding>: Sync + Send {
303    /// Inform the triggering policy that we have pending allocation.
304    /// Any GC trigger policy with dynamic heap size should take this into account when calculating a new heap size.
305    /// Failing to do so may result in unnecessay GCs, or result in an infinite loop if the new heap size
306    /// can never accomodate the pending allocation.
307    fn on_pending_allocation(&self, _pages: usize) {}
308    /// Inform the triggering policy that a GC starts.
309    fn on_gc_start(&self, _mmtk: &'static MMTK<VM>) {}
310    /// Inform the triggering policy that a GC is about to start the release work. This is called
311    /// in the global Release work packet. This means we assume a plan
312    /// do not schedule any work that reclaims memory before the global `Release` work. The current plans
313    /// satisfy this assumption: they schedule other release work in `plan.release()`.
314    fn on_gc_release(&self, _mmtk: &'static MMTK<VM>) {}
315    /// Inform the triggering policy that a GC ends.
316    fn on_gc_end(&self, _mmtk: &'static MMTK<VM>) {}
317    /// Is a GC required now? The GC trigger may implement its own heuristics to decide when
318    /// a GC should be performed. However, we recommend the implementation to do its own checks
319    /// first, and always call `plan.collection_required(space_full, space)` at the end as a fallback to see if the plan needs
320    /// to do a GC.
321    ///
322    /// Arguments:
323    /// * `space_full`: Is any space full?
324    /// * `space`: The space that is full. The GC trigger may access some stats of the space.
325    /// * `plan`: The reference to the plan in use.
326    fn is_gc_required(
327        &self,
328        space_full: bool,
329        space: Option<SpaceStats<VM>>,
330        plan: &dyn Plan<VM = VM>,
331    ) -> bool;
332    /// Is current heap full?
333    fn is_heap_full(&self, plan: &dyn Plan<VM = VM>) -> bool;
334    /// Return the current heap size (in pages)
335    fn get_current_heap_size_in_pages(&self) -> usize;
336    /// Return the upper bound of heap size
337    fn get_max_heap_size_in_pages(&self) -> usize;
338    /// Can the heap size grow?
339    fn can_heap_size_grow(&self) -> bool;
340}
341
342/// A simple GC trigger that uses a fixed heap size.
343pub struct FixedHeapSizeTrigger {
344    total_pages: usize,
345}
346impl<VM: VMBinding> GCTriggerPolicy<VM> for FixedHeapSizeTrigger {
347    fn is_gc_required(
348        &self,
349        space_full: bool,
350        space: Option<SpaceStats<VM>>,
351        plan: &dyn Plan<VM = VM>,
352    ) -> bool {
353        // Let the plan decide
354        plan.collection_required(space_full, space)
355    }
356
357    fn is_heap_full(&self, plan: &dyn Plan<VM = VM>) -> bool {
358        // If reserved pages is larger than the total pages, the heap is full.
359        plan.get_reserved_pages() > self.total_pages
360    }
361
362    fn get_current_heap_size_in_pages(&self) -> usize {
363        self.total_pages
364    }
365
366    fn get_max_heap_size_in_pages(&self) -> usize {
367        self.total_pages
368    }
369
370    fn can_heap_size_grow(&self) -> bool {
371        false
372    }
373}
374
375use atomic_refcell::AtomicRefCell;
376use std::time::Instant;
377
378/// An implementation of MemBalancer (Optimal heap limits for reducing browser memory use, <https://dl.acm.org/doi/10.1145/3563323>)
379/// We use MemBalancer to decide a heap limit between the min heap and the max heap.
380/// The current implementation is a simplified version of mem balancer and it does not take collection/allocation speed into account,
381/// and uses a fixed constant instead.
382// TODO: implement a complete mem balancer.
383pub struct MemBalancerTrigger {
384    /// The min heap size
385    min_heap_pages: usize,
386    /// The max heap size
387    max_heap_pages: usize,
388    /// The current heap size
389    current_heap_pages: AtomicUsize,
390    /// The number of pending allocation pages. The allocation requests for them have failed, and a GC is triggered.
391    /// We will need to take them into consideration so that the new heap size can accomodate those allocations.
392    pending_pages: AtomicUsize,
393    /// Statistics
394    stats: AtomicRefCell<MemBalancerStats>,
395}
396
397#[derive(Copy, Clone, Debug)]
398struct MemBalancerStats {
399    // Allocation/collection stats in the previous estimation. We keep this so we can use them to smooth the current value
400    /// Previous allocated memory in pages.
401    allocation_pages_prev: Option<f64>,
402    /// Previous allocation duration in secs
403    allocation_time_prev: Option<f64>,
404    /// Previous collected memory in pages
405    collection_pages_prev: Option<f64>,
406    /// Previous colleciton duration in secs
407    collection_time_prev: Option<f64>,
408
409    // Allocation/collection stats in this estimation.
410    /// Allocated memory in pages
411    allocation_pages: f64,
412    /// Allocation duration in secs
413    allocation_time: f64,
414    /// Collected memory in pages (memory traversed during collection)
415    collection_pages: f64,
416    /// Collection duration in secs
417    collection_time: f64,
418
419    /// The time when this GC starts
420    gc_start_time: Instant,
421    /// The time when this GC ends
422    gc_end_time: Instant,
423
424    /// The live pages before we release memory.
425    gc_release_live_pages: usize,
426    /// The live pages at the GC end
427    gc_end_live_pages: usize,
428}
429
430impl std::default::Default for MemBalancerStats {
431    fn default() -> Self {
432        let now = Instant::now();
433        Self {
434            allocation_pages_prev: None,
435            allocation_time_prev: None,
436            collection_pages_prev: None,
437            collection_time_prev: None,
438            allocation_pages: 0f64,
439            allocation_time: 0f64,
440            collection_pages: 0f64,
441            collection_time: 0f64,
442            gc_start_time: now,
443            gc_end_time: now,
444            gc_release_live_pages: 0,
445            gc_end_live_pages: 0,
446        }
447    }
448}
449
450use crate::plan::GenerationalPlan;
451
452impl MemBalancerStats {
453    // Collect mem stats for generational plans:
454    // * We ignore nursery GCs.
455    // * allocation = objects in mature space = promoted + pretentured = live pages in mature space before release - live pages at the end of last mature GC
456    // * collection = live pages in mature space at the end of GC -  live pages in mature space before release
457
458    fn generational_mem_stats_on_gc_start<VM: VMBinding>(
459        &mut self,
460        _plan: &dyn GenerationalPlan<VM = VM>,
461    ) {
462        // We don't need to do anything
463    }
464    fn generational_mem_stats_on_gc_release<VM: VMBinding>(
465        &mut self,
466        plan: &dyn GenerationalPlan<VM = VM>,
467    ) {
468        if !plan.is_current_gc_nursery() {
469            self.gc_release_live_pages = plan.get_mature_reserved_pages();
470
471            // Calculate the promoted pages (including pre tentured objects)
472            let promoted = self
473                .gc_release_live_pages
474                .saturating_sub(self.gc_end_live_pages);
475            self.allocation_pages = promoted as f64;
476            trace!(
477                "promoted = mature live before release {} - mature live at prev gc end {} = {}",
478                self.gc_release_live_pages,
479                self.gc_end_live_pages,
480                promoted
481            );
482            trace!(
483                "allocated pages (accumulated to) = {}",
484                self.allocation_pages
485            );
486        }
487    }
488    /// Return true if we should compute a new heap limit. Only do so at the end of a mature GC
489    fn generational_mem_stats_on_gc_end<VM: VMBinding>(
490        &mut self,
491        plan: &dyn GenerationalPlan<VM = VM>,
492    ) -> bool {
493        if !plan.is_current_gc_nursery() {
494            self.gc_end_live_pages = plan.get_mature_reserved_pages();
495            // Use live pages as an estimate for pages traversed during GC
496            self.collection_pages = self.gc_end_live_pages as f64;
497            trace!(
498                "collected pages = mature live at gc end {} - mature live at gc release {} = {}",
499                self.gc_release_live_pages,
500                self.gc_end_live_pages,
501                self.collection_pages
502            );
503            true
504        } else {
505            false
506        }
507    }
508
509    // Collect mem stats for non generational plans
510    // * allocation = live pages at the start of GC - live pages at the end of last GC
511    // * collection = live pages at the end of GC - live pages before release
512
513    fn non_generational_mem_stats_on_gc_start<VM: VMBinding>(&mut self, mmtk: &'static MMTK<VM>) {
514        self.allocation_pages = mmtk
515            .get_plan()
516            .get_reserved_pages()
517            .saturating_sub(self.gc_end_live_pages) as f64;
518        trace!(
519            "allocated pages = used {} - live in last gc {} = {}",
520            mmtk.get_plan().get_reserved_pages(),
521            self.gc_end_live_pages,
522            self.allocation_pages
523        );
524    }
525    fn non_generational_mem_stats_on_gc_release<VM: VMBinding>(&mut self, mmtk: &'static MMTK<VM>) {
526        self.gc_release_live_pages = mmtk.get_plan().get_reserved_pages();
527        trace!("live before release = {}", self.gc_release_live_pages);
528    }
529    fn non_generational_mem_stats_on_gc_end<VM: VMBinding>(&mut self, mmtk: &'static MMTK<VM>) {
530        self.gc_end_live_pages = mmtk.get_plan().get_reserved_pages();
531        trace!("live pages = {}", self.gc_end_live_pages);
532        // Use live pages as an estimate for pages traversed during GC
533        self.collection_pages = self.gc_end_live_pages as f64;
534        trace!(
535            "collected pages = live at gc end {} - live at gc release {} = {}",
536            self.gc_release_live_pages,
537            self.gc_end_live_pages,
538            self.collection_pages
539        );
540    }
541}
542
543impl<VM: VMBinding> GCTriggerPolicy<VM> for MemBalancerTrigger {
544    fn is_gc_required(
545        &self,
546        space_full: bool,
547        space: Option<SpaceStats<VM>>,
548        plan: &dyn Plan<VM = VM>,
549    ) -> bool {
550        // Let the plan decide
551        plan.collection_required(space_full, space)
552    }
553
554    fn on_pending_allocation(&self, pages: usize) {
555        self.pending_pages.fetch_add(pages, Ordering::SeqCst);
556    }
557
558    fn on_gc_start(&self, mmtk: &'static MMTK<VM>) {
559        trace!("=== on_gc_start ===");
560        self.access_stats(|stats| {
561            stats.gc_start_time = Instant::now();
562            stats.allocation_time += (stats.gc_start_time - stats.gc_end_time).as_secs_f64();
563            trace!(
564                "gc_start = {:?}, allocation_time = {}",
565                stats.gc_start_time,
566                stats.allocation_time
567            );
568
569            if let Some(plan) = mmtk.get_plan().generational() {
570                stats.generational_mem_stats_on_gc_start(plan);
571            } else {
572                stats.non_generational_mem_stats_on_gc_start(mmtk);
573            }
574        });
575    }
576
577    fn on_gc_release(&self, mmtk: &'static MMTK<VM>) {
578        trace!("=== on_gc_release ===");
579        self.access_stats(|stats| {
580            if let Some(plan) = mmtk.get_plan().generational() {
581                stats.generational_mem_stats_on_gc_release(plan);
582            } else {
583                stats.non_generational_mem_stats_on_gc_release(mmtk);
584            }
585        });
586    }
587
588    fn on_gc_end(&self, mmtk: &'static MMTK<VM>) {
589        trace!("=== on_gc_end ===");
590        self.access_stats(|stats| {
591            stats.gc_end_time = Instant::now();
592            stats.collection_time += (stats.gc_end_time - stats.gc_start_time).as_secs_f64();
593            trace!(
594                "gc_end = {:?}, collection_time = {}",
595                stats.gc_end_time,
596                stats.collection_time
597            );
598
599            if let Some(plan) = mmtk.get_plan().generational() {
600                if stats.generational_mem_stats_on_gc_end(plan) {
601                    self.compute_new_heap_limit(
602                        mmtk.get_plan().get_reserved_pages(),
603                        // We reserve an extra of min nursery. This ensures that we will not trigger
604                        // a full heap GC in the next GC (if available pages is smaller than min nursery, we will force a full heap GC)
605                        mmtk.get_plan().get_collection_reserved_pages()
606                            + mmtk.gc_trigger.get_min_nursery_pages(),
607                        stats,
608                    );
609                }
610            } else {
611                stats.non_generational_mem_stats_on_gc_end(mmtk);
612                self.compute_new_heap_limit(
613                    mmtk.get_plan().get_reserved_pages(),
614                    mmtk.get_plan().get_collection_reserved_pages(),
615                    stats,
616                );
617            }
618        });
619        // Clear pending allocation pages at the end of GC, no matter we used it or not.
620        self.pending_pages.store(0, Ordering::SeqCst);
621    }
622
623    fn is_heap_full(&self, plan: &dyn Plan<VM = VM>) -> bool {
624        // If reserved pages is larger than the current heap size, the heap is full.
625        plan.get_reserved_pages() > self.current_heap_pages.load(Ordering::Relaxed)
626    }
627
628    fn get_current_heap_size_in_pages(&self) -> usize {
629        self.current_heap_pages.load(Ordering::Relaxed)
630    }
631
632    fn get_max_heap_size_in_pages(&self) -> usize {
633        self.max_heap_pages
634    }
635
636    fn can_heap_size_grow(&self) -> bool {
637        self.current_heap_pages.load(Ordering::Relaxed) < self.max_heap_pages
638    }
639}
640impl MemBalancerTrigger {
641    fn new(min_heap_pages: usize, max_heap_pages: usize) -> Self {
642        Self {
643            min_heap_pages,
644            max_heap_pages,
645            pending_pages: AtomicUsize::new(0),
646            // start with min heap
647            current_heap_pages: AtomicUsize::new(min_heap_pages),
648            stats: AtomicRefCell::new(Default::default()),
649        }
650    }
651
652    fn access_stats<F>(&self, mut f: F)
653    where
654        F: FnMut(&mut MemBalancerStats),
655    {
656        let mut stats = self.stats.borrow_mut();
657        f(&mut stats);
658    }
659
660    fn compute_new_heap_limit(
661        &self,
662        live: usize,
663        extra_reserve: usize,
664        stats: &mut MemBalancerStats,
665    ) {
666        trace!("compute new heap limit: {:?}", stats);
667
668        // Constants from the original paper
669        const ALLOCATION_SMOOTH_FACTOR: f64 = 0.95;
670        const COLLECTION_SMOOTH_FACTOR: f64 = 0.5;
671        const TUNING_FACTOR: f64 = 0.2;
672
673        // Smooth memory/time for allocation/collection
674        let smooth = |prev: Option<f64>, cur, factor| {
675            prev.map(|p| p * factor + cur * (1.0f64 - factor))
676                .unwrap_or(cur)
677        };
678        let alloc_mem = smooth(
679            stats.allocation_pages_prev,
680            stats.allocation_pages,
681            ALLOCATION_SMOOTH_FACTOR,
682        );
683        let alloc_time = smooth(
684            stats.allocation_time_prev,
685            stats.allocation_time,
686            ALLOCATION_SMOOTH_FACTOR,
687        );
688        let gc_mem = smooth(
689            stats.collection_pages_prev,
690            stats.collection_pages,
691            COLLECTION_SMOOTH_FACTOR,
692        );
693        let gc_time = smooth(
694            stats.collection_time_prev,
695            stats.collection_time,
696            COLLECTION_SMOOTH_FACTOR,
697        );
698        trace!(
699            "after smoothing, alloc mem = {}, alloc_time = {}",
700            alloc_mem,
701            alloc_time
702        );
703        trace!(
704            "after smoothing, gc mem    = {}, gc_time    = {}",
705            gc_mem,
706            gc_time
707        );
708
709        // We got the smoothed stats. Now save the current stats as previous stats
710        stats.allocation_pages_prev = Some(stats.allocation_pages);
711        stats.allocation_pages = 0f64;
712        stats.allocation_time_prev = Some(stats.allocation_time);
713        stats.allocation_time = 0f64;
714        stats.collection_pages_prev = Some(stats.collection_pages);
715        stats.collection_pages = 0f64;
716        stats.collection_time_prev = Some(stats.collection_time);
717        stats.collection_time = 0f64;
718
719        // Calculate the square root
720        let e: f64 = if alloc_mem != 0f64 && gc_mem != 0f64 && alloc_time != 0f64 && gc_time != 0f64
721        {
722            let mut e = live as f64;
723            e *= alloc_mem / alloc_time;
724            e /= TUNING_FACTOR;
725            e /= gc_mem / gc_time;
726            e.sqrt()
727        } else {
728            // If any collected stat is abnormal, we use the fallback heuristics.
729            (live as f64 * 4096f64).sqrt()
730        };
731
732        // Get pending allocations
733        let pending_pages = self.pending_pages.load(Ordering::SeqCst);
734
735        // This is the optimal heap limit due to mem balancer. We will need to clamp the value to the defined min/max range.
736        let optimal_heap = live + e as usize + extra_reserve + pending_pages;
737        trace!(
738            "optimal = live {} + sqrt(live) {} + extra {}",
739            live,
740            e,
741            extra_reserve
742        );
743
744        // The new heap size must be within min/max.
745        let new_heap = optimal_heap.clamp(self.min_heap_pages, self.max_heap_pages);
746        debug!(
747            "MemBalander: new heap limit = {} pages (optimal = {}, clamped to [{}, {}])",
748            new_heap, optimal_heap, self.min_heap_pages, self.max_heap_pages
749        );
750        self.current_heap_pages.store(new_heap, Ordering::Relaxed);
751    }
752}