mmtk/util/heap/
gc_trigger.rs

1use atomic::Ordering;
2
3use crate::global_state::GlobalState;
4use crate::plan::Plan;
5use crate::policy::space::Space;
6use crate::scheduler::GCWorkScheduler;
7use crate::util::constants::BYTES_IN_PAGE;
8use crate::util::conversions;
9use crate::util::options::{GCTriggerSelector, Options, DEFAULT_MAX_NURSERY, DEFAULT_MIN_NURSERY};
10use crate::vm::Collection;
11use crate::vm::VMBinding;
12use crate::MMTK;
13use std::mem::MaybeUninit;
14use std::sync::atomic::{AtomicBool, AtomicUsize};
15use std::sync::Arc;
16
17/// GCTrigger is responsible for triggering GCs based on the given policy.
18/// All the decisions about heap limit and GC triggering should be resolved here.
19/// Depending on the actual policy, we may either forward the calls either to the plan
20/// or to the binding/runtime.
21pub struct GCTrigger<VM: VMBinding> {
22    /// The current plan. This is uninitialized when we create it, and later initialized
23    /// once we have a fixed address for the plan.
24    plan: MaybeUninit<&'static dyn Plan<VM = VM>>,
25    /// The triggering policy.
26    pub policy: Box<dyn GCTriggerPolicy<VM>>,
27    /// Set by mutators to trigger GC.  It is atomic so that mutators can check if GC has already
28    /// been requested efficiently in `poll` without acquiring any mutex.
29    request_flag: AtomicBool,
30    scheduler: Arc<GCWorkScheduler<VM>>,
31    options: Arc<Options>,
32    state: Arc<GlobalState>,
33}
34
35impl<VM: VMBinding> GCTrigger<VM> {
36    pub fn new(
37        options: Arc<Options>,
38        scheduler: Arc<GCWorkScheduler<VM>>,
39        state: Arc<GlobalState>,
40    ) -> Self {
41        GCTrigger {
42            plan: MaybeUninit::uninit(),
43            policy: match *options.gc_trigger {
44                GCTriggerSelector::FixedHeapSize(size) => Box::new(FixedHeapSizeTrigger {
45                    total_pages: conversions::bytes_to_pages_up(size),
46                }),
47                GCTriggerSelector::DynamicHeapSize(min, max) => 'dynamic_heap_size: {
48                    let min_pages = conversions::bytes_to_pages_up(min);
49                    let max_pages = conversions::bytes_to_pages_up(max);
50
51                    if *options.plan == crate::util::options::PlanSelector::NoGC {
52                        warn!("Cannot use dynamic heap size with NoGC.  Using fixed heap size trigger instead.");
53                        break 'dynamic_heap_size Box::new(FixedHeapSizeTrigger {
54                            total_pages: max_pages,
55                        });
56                    }
57
58                    Box::new(MemBalancerTrigger::new(min_pages, max_pages))
59                }
60                GCTriggerSelector::Delegated => {
61                    <VM::VMCollection as crate::vm::Collection<VM>>::create_gc_trigger()
62                }
63            },
64            options,
65            request_flag: AtomicBool::new(false),
66            scheduler,
67            state,
68        }
69    }
70
71    /// Set the plan. This is called in `create_plan()` after we created a boxed plan.
72    pub fn set_plan(&mut self, plan: &'static dyn Plan<VM = VM>) {
73        self.plan.write(plan);
74    }
75
76    fn plan(&self) -> &dyn Plan<VM = VM> {
77        unsafe { self.plan.assume_init() }
78    }
79
80    /// Request a GC.  Called by mutators when polling (during allocation) and when handling user
81    /// GC requests (e.g. `System.gc();` in Java).
82    fn request(&self) {
83        if self.request_flag.load(Ordering::Relaxed) {
84            return;
85        }
86
87        if !self.request_flag.swap(true, Ordering::Relaxed) {
88            // `GCWorkScheduler::request_schedule_collection` needs to hold a mutex to communicate
89            // with GC workers, which is expensive for functions like `poll`.  We use the atomic
90            // flag `request_flag` to elide the need to acquire the mutex in subsequent calls.
91            probe!(mmtk, gc_requested);
92            self.scheduler.request_schedule_collection();
93        }
94    }
95
96    /// Clear the "GC requested" flag so that mutators can trigger the next GC.
97    /// Called by a GC worker when all mutators have come to a stop.
98    pub fn clear_request(&self) {
99        self.request_flag.store(false, Ordering::Relaxed);
100    }
101
102    /// This method is called periodically by the allocation subsystem
103    /// (by default, each time a page is consumed), and provides the
104    /// collector with an opportunity to collect.
105    ///
106    /// Arguments:
107    /// * `space_full`: Space request failed, must recover pages within 'space'.
108    /// * `space`: The space that triggered the poll. This could `None` if the poll is not triggered by a space.
109    pub fn poll(&self, space_full: bool, space: Option<&dyn Space<VM>>) -> bool {
110        if !VM::VMCollection::is_collection_enabled() {
111            return false;
112        }
113
114        let plan = self.plan();
115        if self
116            .policy
117            .is_gc_required(space_full, space.map(|s| SpaceStats::new(s)), plan)
118        {
119            info!(
120                "[POLL] {}{} ({}/{} pages)",
121                if let Some(space) = space {
122                    format!("{}: ", space.get_name())
123                } else {
124                    "".to_string()
125                },
126                "Triggering collection",
127                plan.get_reserved_pages(),
128                plan.get_total_pages(),
129            );
130            self.request();
131            return true;
132        }
133        false
134    }
135
136    /// This method is called when the user manually requests a collection, such as `System.gc()` in Java.
137    /// Returns true if a collection is actually requested.
138    ///
139    /// # Arguments
140    /// * `force`: If true, we force a collection regardless of the settings. If false, we only trigger a collection if the settings allow it.
141    /// * `exhaustive`: If true, we try to make the collection exhaustive (e.g. full heap collection). If false, the collection kind is determined internally.
142    pub fn handle_user_collection_request(&self, force: bool, exhaustive: bool) -> bool {
143        if !self.plan().constraints().collects_garbage {
144            warn!("User attempted a collection request, but the plan can not do GC. The request is ignored.");
145            return false;
146        }
147
148        if force || !*self.options.ignore_system_gc && VM::VMCollection::is_collection_enabled() {
149            info!("User triggering collection");
150            // TODO: this may not work reliably. If a GC has been triggered, this will not force it to be a full heap GC.
151            if exhaustive {
152                if let Some(gen) = self.plan().generational() {
153                    gen.force_full_heap_collection();
154                }
155            }
156
157            self.state
158                .user_triggered_collection
159                .store(true, Ordering::Relaxed);
160            self.request();
161            return true;
162        }
163
164        false
165    }
166
167    /// MMTK has requested stop-the-world activity (e.g., stw within a concurrent gc).
168    // TODO: We should use this for concurrent GC. E.g. in concurrent Immix, when the initial mark is done, we
169    // can use this function to immediately trigger the final mark pause. The current implementation uses
170    // normal collection_required check, which may delay the final mark unnecessarily.
171    #[allow(unused)]
172    pub fn trigger_internal_collection_request(&self) {
173        self.state
174            .last_internal_triggered_collection
175            .store(true, Ordering::Relaxed);
176        self.state
177            .internal_triggered_collection
178            .store(true, Ordering::Relaxed);
179        // TODO: The current `request()` is probably incorrect for internally triggered GC.
180        // Consider removing functions related to "internal triggered collection".
181        self.request();
182        // TODO: Make sure this function works correctly for concurrent GC.
183        unimplemented!()
184    }
185
186    pub fn should_do_stress_gc(&self) -> bool {
187        Self::should_do_stress_gc_inner(&self.state, &self.options)
188    }
189
190    /// Check if we should do a stress GC now. If GC is initialized and the allocation bytes exceeds
191    /// the stress factor, we should do a stress GC.
192    pub(crate) fn should_do_stress_gc_inner(state: &GlobalState, options: &Options) -> bool {
193        state.is_initialized()
194            && (state.allocation_bytes.load(Ordering::SeqCst) > *options.stress_factor)
195    }
196
197    /// Check if the heap is full
198    pub fn is_heap_full(&self) -> bool {
199        self.policy.is_heap_full(self.plan())
200    }
201
202    /// Return upper bound of the nursery size (in number of bytes)
203    pub fn get_max_nursery_bytes(&self) -> usize {
204        use crate::util::options::NurserySize;
205        debug_assert!(self.plan().generational().is_some());
206        match *self.options.nursery {
207            NurserySize::Bounded { min: _, max } => max,
208            NurserySize::ProportionalBounded { min: _, max } => {
209                let heap_size_bytes =
210                    conversions::pages_to_bytes(self.policy.get_current_heap_size_in_pages());
211                let max_bytes = heap_size_bytes as f64 * max;
212                let max_bytes = conversions::raw_align_up(max_bytes as usize, BYTES_IN_PAGE);
213                if max_bytes > DEFAULT_MAX_NURSERY {
214                    warn!("Proportional nursery with max size {} ({}) is larger than DEFAULT_MAX_NURSERY ({}). Use DEFAULT_MAX_NURSERY instead.", max, max_bytes, DEFAULT_MAX_NURSERY);
215                    DEFAULT_MAX_NURSERY
216                } else {
217                    max_bytes
218                }
219            }
220            NurserySize::Fixed(sz) => sz,
221        }
222    }
223
224    /// Return lower bound of the nursery size (in number of bytes)
225    pub fn get_min_nursery_bytes(&self) -> usize {
226        use crate::util::options::NurserySize;
227        debug_assert!(self.plan().generational().is_some());
228        match *self.options.nursery {
229            NurserySize::Bounded { min, max: _ } => min,
230            NurserySize::ProportionalBounded { min, max: _ } => {
231                let min_bytes =
232                    conversions::pages_to_bytes(self.policy.get_current_heap_size_in_pages())
233                        as f64
234                        * min;
235                let min_bytes = conversions::raw_align_up(min_bytes as usize, BYTES_IN_PAGE);
236                if min_bytes < DEFAULT_MIN_NURSERY {
237                    warn!("Proportional nursery with min size {} ({}) is smaller than DEFAULT_MIN_NURSERY ({}). Use DEFAULT_MIN_NURSERY instead.", min, min_bytes, DEFAULT_MIN_NURSERY);
238                    DEFAULT_MIN_NURSERY
239                } else {
240                    min_bytes
241                }
242            }
243            NurserySize::Fixed(sz) => sz,
244        }
245    }
246
247    /// Return upper bound of the nursery size (in number of pages)
248    pub fn get_max_nursery_pages(&self) -> usize {
249        crate::util::conversions::bytes_to_pages_up(self.get_max_nursery_bytes())
250    }
251
252    /// Return lower bound of the nursery size (in number of pages)
253    pub fn get_min_nursery_pages(&self) -> usize {
254        crate::util::conversions::bytes_to_pages_up(self.get_min_nursery_bytes())
255    }
256}
257
258/// Provides statistics about the space. This is exposed to bindings, as it is used
259/// in both [`crate::plan::Plan`] and [`GCTriggerPolicy`].
260// This type exists so we do not need to expose the `Space` trait to the bindings.
261pub struct SpaceStats<'a, VM: VMBinding>(pub(crate) &'a dyn Space<VM>);
262
263impl<'a, VM: VMBinding> SpaceStats<'a, VM> {
264    /// Create new SpaceStats.
265    fn new(space: &'a dyn Space<VM>) -> Self {
266        Self(space)
267    }
268
269    /// Get the number of reserved pages for the space.
270    pub fn reserved_pages(&self) -> usize {
271        self.0.reserved_pages()
272    }
273
274    // We may expose more methods to bindings if they need more information for implementing GC triggers.
275    // But we should never expose `Space` itself.
276}
277
278/// This trait describes a GC trigger policy. A triggering policy have hooks to be informed about
279/// GC start/end so they can collect some statistics about GC and allocation. The policy needs to
280/// decide the (current) heap limit and decide whether a GC should be performed.
281pub trait GCTriggerPolicy<VM: VMBinding>: Sync + Send {
282    /// Inform the triggering policy that we have pending allocation.
283    /// Any GC trigger policy with dynamic heap size should take this into account when calculating a new heap size.
284    /// Failing to do so may result in unnecessay GCs, or result in an infinite loop if the new heap size
285    /// can never accomodate the pending allocation.
286    fn on_pending_allocation(&self, _pages: usize) {}
287    /// Inform the triggering policy that a GC starts.
288    fn on_gc_start(&self, _mmtk: &'static MMTK<VM>) {}
289    /// Inform the triggering policy that a GC is about to start the release work. This is called
290    /// in the global Release work packet. This means we assume a plan
291    /// do not schedule any work that reclaims memory before the global `Release` work. The current plans
292    /// satisfy this assumption: they schedule other release work in `plan.release()`.
293    fn on_gc_release(&self, _mmtk: &'static MMTK<VM>) {}
294    /// Inform the triggering policy that a GC ends.
295    fn on_gc_end(&self, _mmtk: &'static MMTK<VM>) {}
296    /// Is a GC required now? The GC trigger may implement its own heuristics to decide when
297    /// a GC should be performed. However, we recommend the implementation to do its own checks
298    /// first, and always call `plan.collection_required(space_full, space)` at the end as a fallback to see if the plan needs
299    /// to do a GC.
300    ///
301    /// Arguments:
302    /// * `space_full`: Is any space full?
303    /// * `space`: The space that is full. The GC trigger may access some stats of the space.
304    /// * `plan`: The reference to the plan in use.
305    fn is_gc_required(
306        &self,
307        space_full: bool,
308        space: Option<SpaceStats<VM>>,
309        plan: &dyn Plan<VM = VM>,
310    ) -> bool;
311    /// Is current heap full?
312    fn is_heap_full(&self, plan: &dyn Plan<VM = VM>) -> bool;
313    /// Return the current heap size (in pages)
314    fn get_current_heap_size_in_pages(&self) -> usize;
315    /// Return the upper bound of heap size
316    fn get_max_heap_size_in_pages(&self) -> usize;
317    /// Can the heap size grow?
318    fn can_heap_size_grow(&self) -> bool;
319}
320
321/// A simple GC trigger that uses a fixed heap size.
322pub struct FixedHeapSizeTrigger {
323    total_pages: usize,
324}
325impl<VM: VMBinding> GCTriggerPolicy<VM> for FixedHeapSizeTrigger {
326    fn is_gc_required(
327        &self,
328        space_full: bool,
329        space: Option<SpaceStats<VM>>,
330        plan: &dyn Plan<VM = VM>,
331    ) -> bool {
332        // Let the plan decide
333        plan.collection_required(space_full, space)
334    }
335
336    fn is_heap_full(&self, plan: &dyn Plan<VM = VM>) -> bool {
337        // If reserved pages is larger than the total pages, the heap is full.
338        plan.get_reserved_pages() > self.total_pages
339    }
340
341    fn get_current_heap_size_in_pages(&self) -> usize {
342        self.total_pages
343    }
344
345    fn get_max_heap_size_in_pages(&self) -> usize {
346        self.total_pages
347    }
348
349    fn can_heap_size_grow(&self) -> bool {
350        false
351    }
352}
353
354use atomic_refcell::AtomicRefCell;
355use std::time::Instant;
356
357/// An implementation of MemBalancer (Optimal heap limits for reducing browser memory use, <https://dl.acm.org/doi/10.1145/3563323>)
358/// We use MemBalancer to decide a heap limit between the min heap and the max heap.
359/// The current implementation is a simplified version of mem balancer and it does not take collection/allocation speed into account,
360/// and uses a fixed constant instead.
361// TODO: implement a complete mem balancer.
362pub struct MemBalancerTrigger {
363    /// The min heap size
364    min_heap_pages: usize,
365    /// The max heap size
366    max_heap_pages: usize,
367    /// The current heap size
368    current_heap_pages: AtomicUsize,
369    /// The number of pending allocation pages. The allocation requests for them have failed, and a GC is triggered.
370    /// We will need to take them into consideration so that the new heap size can accomodate those allocations.
371    pending_pages: AtomicUsize,
372    /// Statistics
373    stats: AtomicRefCell<MemBalancerStats>,
374}
375
376#[derive(Copy, Clone, Debug)]
377struct MemBalancerStats {
378    // Allocation/collection stats in the previous estimation. We keep this so we can use them to smooth the current value
379    /// Previous allocated memory in pages.
380    allocation_pages_prev: Option<f64>,
381    /// Previous allocation duration in secs
382    allocation_time_prev: Option<f64>,
383    /// Previous collected memory in pages
384    collection_pages_prev: Option<f64>,
385    /// Previous colleciton duration in secs
386    collection_time_prev: Option<f64>,
387
388    // Allocation/collection stats in this estimation.
389    /// Allocated memory in pages
390    allocation_pages: f64,
391    /// Allocation duration in secs
392    allocation_time: f64,
393    /// Collected memory in pages (memory traversed during collection)
394    collection_pages: f64,
395    /// Collection duration in secs
396    collection_time: f64,
397
398    /// The time when this GC starts
399    gc_start_time: Instant,
400    /// The time when this GC ends
401    gc_end_time: Instant,
402
403    /// The live pages before we release memory.
404    gc_release_live_pages: usize,
405    /// The live pages at the GC end
406    gc_end_live_pages: usize,
407}
408
409impl std::default::Default for MemBalancerStats {
410    fn default() -> Self {
411        let now = Instant::now();
412        Self {
413            allocation_pages_prev: None,
414            allocation_time_prev: None,
415            collection_pages_prev: None,
416            collection_time_prev: None,
417            allocation_pages: 0f64,
418            allocation_time: 0f64,
419            collection_pages: 0f64,
420            collection_time: 0f64,
421            gc_start_time: now,
422            gc_end_time: now,
423            gc_release_live_pages: 0,
424            gc_end_live_pages: 0,
425        }
426    }
427}
428
429use crate::plan::GenerationalPlan;
430
431impl MemBalancerStats {
432    // Collect mem stats for generational plans:
433    // * We ignore nursery GCs.
434    // * allocation = objects in mature space = promoted + pretentured = live pages in mature space before release - live pages at the end of last mature GC
435    // * collection = live pages in mature space at the end of GC -  live pages in mature space before release
436
437    fn generational_mem_stats_on_gc_start<VM: VMBinding>(
438        &mut self,
439        _plan: &dyn GenerationalPlan<VM = VM>,
440    ) {
441        // We don't need to do anything
442    }
443    fn generational_mem_stats_on_gc_release<VM: VMBinding>(
444        &mut self,
445        plan: &dyn GenerationalPlan<VM = VM>,
446    ) {
447        if !plan.is_current_gc_nursery() {
448            self.gc_release_live_pages = plan.get_mature_reserved_pages();
449
450            // Calculate the promoted pages (including pre tentured objects)
451            let promoted = self
452                .gc_release_live_pages
453                .saturating_sub(self.gc_end_live_pages);
454            self.allocation_pages = promoted as f64;
455            trace!(
456                "promoted = mature live before release {} - mature live at prev gc end {} = {}",
457                self.gc_release_live_pages,
458                self.gc_end_live_pages,
459                promoted
460            );
461            trace!(
462                "allocated pages (accumulated to) = {}",
463                self.allocation_pages
464            );
465        }
466    }
467    /// Return true if we should compute a new heap limit. Only do so at the end of a mature GC
468    fn generational_mem_stats_on_gc_end<VM: VMBinding>(
469        &mut self,
470        plan: &dyn GenerationalPlan<VM = VM>,
471    ) -> bool {
472        if !plan.is_current_gc_nursery() {
473            self.gc_end_live_pages = plan.get_mature_reserved_pages();
474            // Use live pages as an estimate for pages traversed during GC
475            self.collection_pages = self.gc_end_live_pages as f64;
476            trace!(
477                "collected pages = mature live at gc end {} - mature live at gc release {} = {}",
478                self.gc_release_live_pages,
479                self.gc_end_live_pages,
480                self.collection_pages
481            );
482            true
483        } else {
484            false
485        }
486    }
487
488    // Collect mem stats for non generational plans
489    // * allocation = live pages at the start of GC - live pages at the end of last GC
490    // * collection = live pages at the end of GC - live pages before release
491
492    fn non_generational_mem_stats_on_gc_start<VM: VMBinding>(&mut self, mmtk: &'static MMTK<VM>) {
493        self.allocation_pages = mmtk
494            .get_plan()
495            .get_reserved_pages()
496            .saturating_sub(self.gc_end_live_pages) as f64;
497        trace!(
498            "allocated pages = used {} - live in last gc {} = {}",
499            mmtk.get_plan().get_reserved_pages(),
500            self.gc_end_live_pages,
501            self.allocation_pages
502        );
503    }
504    fn non_generational_mem_stats_on_gc_release<VM: VMBinding>(&mut self, mmtk: &'static MMTK<VM>) {
505        self.gc_release_live_pages = mmtk.get_plan().get_reserved_pages();
506        trace!("live before release = {}", self.gc_release_live_pages);
507    }
508    fn non_generational_mem_stats_on_gc_end<VM: VMBinding>(&mut self, mmtk: &'static MMTK<VM>) {
509        self.gc_end_live_pages = mmtk.get_plan().get_reserved_pages();
510        trace!("live pages = {}", self.gc_end_live_pages);
511        // Use live pages as an estimate for pages traversed during GC
512        self.collection_pages = self.gc_end_live_pages as f64;
513        trace!(
514            "collected pages = live at gc end {} - live at gc release {} = {}",
515            self.gc_release_live_pages,
516            self.gc_end_live_pages,
517            self.collection_pages
518        );
519    }
520}
521
522impl<VM: VMBinding> GCTriggerPolicy<VM> for MemBalancerTrigger {
523    fn is_gc_required(
524        &self,
525        space_full: bool,
526        space: Option<SpaceStats<VM>>,
527        plan: &dyn Plan<VM = VM>,
528    ) -> bool {
529        // Let the plan decide
530        plan.collection_required(space_full, space)
531    }
532
533    fn on_pending_allocation(&self, pages: usize) {
534        self.pending_pages.fetch_add(pages, Ordering::SeqCst);
535    }
536
537    fn on_gc_start(&self, mmtk: &'static MMTK<VM>) {
538        trace!("=== on_gc_start ===");
539        self.access_stats(|stats| {
540            stats.gc_start_time = Instant::now();
541            stats.allocation_time += (stats.gc_start_time - stats.gc_end_time).as_secs_f64();
542            trace!(
543                "gc_start = {:?}, allocation_time = {}",
544                stats.gc_start_time,
545                stats.allocation_time
546            );
547
548            if let Some(plan) = mmtk.get_plan().generational() {
549                stats.generational_mem_stats_on_gc_start(plan);
550            } else {
551                stats.non_generational_mem_stats_on_gc_start(mmtk);
552            }
553        });
554    }
555
556    fn on_gc_release(&self, mmtk: &'static MMTK<VM>) {
557        trace!("=== on_gc_release ===");
558        self.access_stats(|stats| {
559            if let Some(plan) = mmtk.get_plan().generational() {
560                stats.generational_mem_stats_on_gc_release(plan);
561            } else {
562                stats.non_generational_mem_stats_on_gc_release(mmtk);
563            }
564        });
565    }
566
567    fn on_gc_end(&self, mmtk: &'static MMTK<VM>) {
568        trace!("=== on_gc_end ===");
569        self.access_stats(|stats| {
570            stats.gc_end_time = Instant::now();
571            stats.collection_time += (stats.gc_end_time - stats.gc_start_time).as_secs_f64();
572            trace!(
573                "gc_end = {:?}, collection_time = {}",
574                stats.gc_end_time,
575                stats.collection_time
576            );
577
578            if let Some(plan) = mmtk.get_plan().generational() {
579                if stats.generational_mem_stats_on_gc_end(plan) {
580                    self.compute_new_heap_limit(
581                        mmtk.get_plan().get_reserved_pages(),
582                        // We reserve an extra of min nursery. This ensures that we will not trigger
583                        // a full heap GC in the next GC (if available pages is smaller than min nursery, we will force a full heap GC)
584                        mmtk.get_plan().get_collection_reserved_pages()
585                            + mmtk.gc_trigger.get_min_nursery_pages(),
586                        stats,
587                    );
588                }
589            } else {
590                stats.non_generational_mem_stats_on_gc_end(mmtk);
591                self.compute_new_heap_limit(
592                    mmtk.get_plan().get_reserved_pages(),
593                    mmtk.get_plan().get_collection_reserved_pages(),
594                    stats,
595                );
596            }
597        });
598        // Clear pending allocation pages at the end of GC, no matter we used it or not.
599        self.pending_pages.store(0, Ordering::SeqCst);
600    }
601
602    fn is_heap_full(&self, plan: &dyn Plan<VM = VM>) -> bool {
603        // If reserved pages is larger than the current heap size, the heap is full.
604        plan.get_reserved_pages() > self.current_heap_pages.load(Ordering::Relaxed)
605    }
606
607    fn get_current_heap_size_in_pages(&self) -> usize {
608        self.current_heap_pages.load(Ordering::Relaxed)
609    }
610
611    fn get_max_heap_size_in_pages(&self) -> usize {
612        self.max_heap_pages
613    }
614
615    fn can_heap_size_grow(&self) -> bool {
616        self.current_heap_pages.load(Ordering::Relaxed) < self.max_heap_pages
617    }
618}
619impl MemBalancerTrigger {
620    fn new(min_heap_pages: usize, max_heap_pages: usize) -> Self {
621        Self {
622            min_heap_pages,
623            max_heap_pages,
624            pending_pages: AtomicUsize::new(0),
625            // start with min heap
626            current_heap_pages: AtomicUsize::new(min_heap_pages),
627            stats: AtomicRefCell::new(Default::default()),
628        }
629    }
630
631    fn access_stats<F>(&self, mut f: F)
632    where
633        F: FnMut(&mut MemBalancerStats),
634    {
635        let mut stats = self.stats.borrow_mut();
636        f(&mut stats);
637    }
638
639    fn compute_new_heap_limit(
640        &self,
641        live: usize,
642        extra_reserve: usize,
643        stats: &mut MemBalancerStats,
644    ) {
645        trace!("compute new heap limit: {:?}", stats);
646
647        // Constants from the original paper
648        const ALLOCATION_SMOOTH_FACTOR: f64 = 0.95;
649        const COLLECTION_SMOOTH_FACTOR: f64 = 0.5;
650        const TUNING_FACTOR: f64 = 0.2;
651
652        // Smooth memory/time for allocation/collection
653        let smooth = |prev: Option<f64>, cur, factor| {
654            prev.map(|p| p * factor + cur * (1.0f64 - factor))
655                .unwrap_or(cur)
656        };
657        let alloc_mem = smooth(
658            stats.allocation_pages_prev,
659            stats.allocation_pages,
660            ALLOCATION_SMOOTH_FACTOR,
661        );
662        let alloc_time = smooth(
663            stats.allocation_time_prev,
664            stats.allocation_time,
665            ALLOCATION_SMOOTH_FACTOR,
666        );
667        let gc_mem = smooth(
668            stats.collection_pages_prev,
669            stats.collection_pages,
670            COLLECTION_SMOOTH_FACTOR,
671        );
672        let gc_time = smooth(
673            stats.collection_time_prev,
674            stats.collection_time,
675            COLLECTION_SMOOTH_FACTOR,
676        );
677        trace!(
678            "after smoothing, alloc mem = {}, alloc_time = {}",
679            alloc_mem,
680            alloc_time
681        );
682        trace!(
683            "after smoothing, gc mem    = {}, gc_time    = {}",
684            gc_mem,
685            gc_time
686        );
687
688        // We got the smoothed stats. Now save the current stats as previous stats
689        stats.allocation_pages_prev = Some(stats.allocation_pages);
690        stats.allocation_pages = 0f64;
691        stats.allocation_time_prev = Some(stats.allocation_time);
692        stats.allocation_time = 0f64;
693        stats.collection_pages_prev = Some(stats.collection_pages);
694        stats.collection_pages = 0f64;
695        stats.collection_time_prev = Some(stats.collection_time);
696        stats.collection_time = 0f64;
697
698        // Calculate the square root
699        let e: f64 = if alloc_mem != 0f64 && gc_mem != 0f64 && alloc_time != 0f64 && gc_time != 0f64
700        {
701            let mut e = live as f64;
702            e *= alloc_mem / alloc_time;
703            e /= TUNING_FACTOR;
704            e /= gc_mem / gc_time;
705            e.sqrt()
706        } else {
707            // If any collected stat is abnormal, we use the fallback heuristics.
708            (live as f64 * 4096f64).sqrt()
709        };
710
711        // Get pending allocations
712        let pending_pages = self.pending_pages.load(Ordering::SeqCst);
713
714        // This is the optimal heap limit due to mem balancer. We will need to clamp the value to the defined min/max range.
715        let optimal_heap = live + e as usize + extra_reserve + pending_pages;
716        trace!(
717            "optimal = live {} + sqrt(live) {} + extra {}",
718            live,
719            e,
720            extra_reserve
721        );
722
723        // The new heap size must be within min/max.
724        let new_heap = optimal_heap.clamp(self.min_heap_pages, self.max_heap_pages);
725        debug!(
726            "MemBalander: new heap limit = {} pages (optimal = {}, clamped to [{}, {}])",
727            new_heap, optimal_heap, self.min_heap_pages, self.max_heap_pages
728        );
729        self.current_heap_pages.store(new_heap, Ordering::Relaxed);
730    }
731}