mmtk/plan/concurrent/immix/
global.rs

1use crate::plan::concurrent::concurrent_marking_work::ProcessRootSlots;
2use crate::plan::concurrent::global::ConcurrentPlan;
3use crate::plan::concurrent::immix::gc_work::ConcurrentImmixGCWorkContext;
4use crate::plan::concurrent::immix::gc_work::ConcurrentImmixSTWGCWorkContext;
5use crate::plan::concurrent::Pause;
6use crate::plan::global::BasePlan;
7use crate::plan::global::CommonPlan;
8use crate::plan::global::CreateGeneralPlanArgs;
9use crate::plan::global::CreateSpecificPlanArgs;
10use crate::plan::immix::mutator::ALLOCATOR_MAPPING;
11use crate::plan::AllocationSemantics;
12use crate::plan::Plan;
13use crate::plan::PlanConstraints;
14use crate::policy::immix::defrag::StatsForDefrag;
15use crate::policy::immix::ImmixSpaceArgs;
16use crate::policy::immix::TRACE_KIND_DEFRAG;
17use crate::policy::immix::TRACE_KIND_FAST;
18use crate::policy::space::Space;
19use crate::scheduler::gc_work::Release;
20use crate::scheduler::gc_work::StopMutators;
21use crate::scheduler::gc_work::UnsupportedProcessEdges;
22use crate::scheduler::gc_work::VMProcessWeakRefs;
23use crate::scheduler::*;
24use crate::util::alloc::allocators::AllocatorSelector;
25use crate::util::copy::*;
26use crate::util::heap::gc_trigger::SpaceStats;
27use crate::util::heap::VMRequest;
28use crate::util::metadata::log_bit::UnlogBitsOperation;
29use crate::util::metadata::side_metadata::SideMetadataContext;
30use crate::vm::ObjectModel;
31use crate::vm::VMBinding;
32use crate::{policy::immix::ImmixSpace, util::opaque_pointer::VMWorkerThread};
33use std::sync::atomic::AtomicBool;
34
35use atomic::Atomic;
36use atomic::Ordering;
37use enum_map::EnumMap;
38
39use mmtk_macros::{HasSpaces, PlanTraceObject};
40
41/// A concurrent Immix plan. The plan supports concurrent collection (strictly non-moving) and STW full heap collection (which may do defrag).
42/// The concurrent GC consists of two STW pauses (initial mark and final mark) with concurrent marking in between.
43#[derive(HasSpaces, PlanTraceObject)]
44pub struct ConcurrentImmix<VM: VMBinding> {
45    #[post_scan]
46    #[space]
47    #[copy_semantics(CopySemantics::DefaultCopy)]
48    pub immix_space: ImmixSpace<VM>,
49    #[parent]
50    pub common: CommonPlan<VM>,
51    last_gc_was_defrag: AtomicBool,
52    current_pause: Atomic<Option<Pause>>,
53    previous_pause: Atomic<Option<Pause>>,
54    should_do_full_gc: AtomicBool,
55    concurrent_marking_active: AtomicBool,
56}
57
58/// The plan constraints for the concurrent immix plan.
59pub const CONCURRENT_IMMIX_CONSTRAINTS: PlanConstraints = PlanConstraints {
60    // If we disable moving in Immix, this is a non-moving plan.
61    moves_objects: !cfg!(feature = "immix_non_moving"),
62    // Max immix object size is half of a block.
63    max_non_los_default_alloc_bytes: crate::policy::immix::MAX_IMMIX_OBJECT_SIZE,
64    needs_prepare_mutator: true,
65    barrier: crate::BarrierSelector::SATBBarrier,
66    needs_log_bit: true,
67    ..PlanConstraints::default()
68};
69
70impl<VM: VMBinding> Plan for ConcurrentImmix<VM> {
71    fn collection_required(&self, space_full: bool, _space: Option<SpaceStats<Self::VM>>) -> bool {
72        if self.base().collection_required(self, space_full) {
73            self.should_do_full_gc.store(true, Ordering::Release);
74            info!("Triggering full GC");
75            return true;
76        }
77
78        let concurrent_marking_in_progress = self.concurrent_marking_in_progress();
79
80        if concurrent_marking_in_progress
81            && self.common.base.scheduler.work_buckets[WorkBucketStage::Concurrent].is_drained()
82        {
83            // After the Concurrent bucket is drained during concurrent marking,
84            // we trigger the FinalMark pause at the next poll() site (here).
85            // FIXME: Immediately trigger FinalMark when the Concurrent bucket is drained.
86            return true;
87        }
88
89        let threshold = self.get_total_pages() >> 1;
90        let used_pages_after_last_gc = self.common.base.global_state.get_used_pages_after_last_gc();
91        let used_pages_now = self.get_used_pages();
92        let allocated = used_pages_now.saturating_sub(used_pages_after_last_gc);
93        if !concurrent_marking_in_progress && allocated > threshold {
94            info!("Allocated {allocated} pages since last GC ({used_pages_now} - {used_pages_after_last_gc} > {threshold}): Do concurrent marking");
95            debug_assert!(
96                self.common.base.scheduler.work_buckets[WorkBucketStage::Concurrent].is_empty()
97            );
98            debug_assert!(!self.concurrent_marking_in_progress());
99            debug_assert_ne!(self.previous_pause(), Some(Pause::InitialMark));
100            return true;
101        }
102        false
103    }
104
105    fn last_collection_was_exhaustive(&self) -> bool {
106        self.immix_space
107            .is_last_gc_exhaustive(self.last_gc_was_defrag.load(Ordering::Relaxed))
108    }
109
110    fn constraints(&self) -> &'static PlanConstraints {
111        &CONCURRENT_IMMIX_CONSTRAINTS
112    }
113
114    fn create_copy_config(&'static self) -> CopyConfig<Self::VM> {
115        use enum_map::enum_map;
116        CopyConfig {
117            copy_mapping: enum_map! {
118                CopySemantics::DefaultCopy => CopySelector::Immix(0),
119                _ => CopySelector::Unused,
120            },
121            space_mapping: vec![(CopySelector::Immix(0), &self.immix_space)],
122            constraints: &CONCURRENT_IMMIX_CONSTRAINTS,
123        }
124    }
125
126    fn schedule_collection(&'static self, scheduler: &GCWorkScheduler<VM>) {
127        let pause = if self.concurrent_marking_in_progress() {
128            // FIXME: Currently it is unsafe to bypass `FinalMark` and go directly from `InitialMark` to `Full`.
129            // It is related to defragmentation.  See https://github.com/mmtk/mmtk-core/issues/1357 for more details.
130            // We currently force `FinalMark` to happen if the last pause is `InitialMark`.
131            Pause::FinalMark
132        } else if self.should_do_full_gc.load(Ordering::SeqCst) {
133            Pause::Full
134        } else {
135            Pause::InitialMark
136        };
137
138        self.current_pause.store(Some(pause), Ordering::SeqCst);
139
140        probe!(mmtk, concurrent_pause_determined, pause as usize);
141
142        match pause {
143            Pause::Full => {
144                // Ref closure buckets is disabled by initial mark, and needs to be re-enabled for full GC before
145                // we reuse the normal Immix scheduling.
146                self.set_ref_closure_buckets_enabled(true);
147                crate::plan::immix::global::Immix::schedule_immix_full_heap_collection::<
148                    ConcurrentImmix<VM>,
149                    ConcurrentImmixSTWGCWorkContext<VM, TRACE_KIND_FAST>,
150                    ConcurrentImmixSTWGCWorkContext<VM, TRACE_KIND_DEFRAG>,
151                >(self, &self.immix_space, scheduler);
152            }
153            Pause::InitialMark => self.schedule_concurrent_marking_initial_pause(scheduler),
154            Pause::FinalMark => self.schedule_concurrent_marking_final_pause(scheduler),
155        }
156    }
157
158    fn get_allocator_mapping(&self) -> &'static EnumMap<AllocationSemantics, AllocatorSelector> {
159        &ALLOCATOR_MAPPING
160    }
161
162    fn prepare(&mut self, tls: VMWorkerThread) {
163        let pause = self.current_pause().unwrap();
164        match pause {
165            Pause::Full => {
166                self.common.prepare(tls, true);
167                self.immix_space.prepare(
168                    true,
169                    Some(StatsForDefrag::new(self)),
170                    // Ignore unlog bits in full GCs because unlog bits should be all 0.
171                    UnlogBitsOperation::NoOp,
172                );
173            }
174            Pause::InitialMark => {
175                self.immix_space.prepare(
176                    true,
177                    Some(StatsForDefrag::new(self)),
178                    // Bulk set log bits so SATB barrier will be triggered on the existing objects.
179                    UnlogBitsOperation::BulkSet,
180                );
181
182                self.common.prepare(tls, true);
183                // Bulk set log bits so SATB barrier will be triggered on the existing objects.
184                self.common
185                    .schedule_unlog_bits_op(UnlogBitsOperation::BulkSet);
186            }
187            Pause::FinalMark => (),
188        }
189    }
190
191    fn release(&mut self, tls: VMWorkerThread) {
192        let pause = self.current_pause().unwrap();
193        match pause {
194            Pause::InitialMark => (),
195            Pause::Full | Pause::FinalMark => {
196                self.immix_space.release(
197                    true,
198                    // Bulk clear log bits so SATB barrier will not be triggered.
199                    UnlogBitsOperation::BulkClear,
200                );
201
202                self.common.release(tls, true);
203
204                if pause == Pause::FinalMark {
205                    // Bulk clear log bits so SATB barrier will not be triggered.
206                    self.common
207                        .schedule_unlog_bits_op(UnlogBitsOperation::BulkClear);
208                } else {
209                    // Full pauses didn't set unlog bits in the first place,
210                    // so there is no need to clear them.
211                    // TODO: Currently InitialMark must be followed by a FinalMark.
212                    // If we allow upgrading a concurrent GC to a full STW GC,
213                    // we will need to clear the unlog bits at an appropriate place.
214                }
215            }
216        }
217    }
218
219    fn end_of_gc(&mut self, _tls: VMWorkerThread) {
220        self.last_gc_was_defrag
221            .store(self.immix_space.end_of_gc(), Ordering::Relaxed);
222
223        let pause = self.current_pause().unwrap();
224        if pause == Pause::InitialMark {
225            self.set_concurrent_marking_state(true);
226        }
227        self.previous_pause.store(Some(pause), Ordering::SeqCst);
228        self.current_pause.store(None, Ordering::SeqCst);
229        if pause != Pause::FinalMark {
230            self.should_do_full_gc.store(false, Ordering::SeqCst);
231        } else {
232            // FIXME: Currently it is unsafe to trigger full GC during concurrent marking.
233            // See `Self::schedule_collection`.
234            // We keep the value of `self.should_do_full_gc` so that if full GC is triggered,
235            // the next GC will be full GC.
236        }
237        info!("{:?} end", pause);
238    }
239
240    fn current_gc_may_move_object(&self) -> bool {
241        self.immix_space.in_defrag()
242    }
243
244    fn get_collection_reserved_pages(&self) -> usize {
245        self.immix_space.defrag_headroom_pages()
246    }
247
248    fn get_used_pages(&self) -> usize {
249        self.immix_space.reserved_pages() + self.common.get_used_pages()
250    }
251
252    fn base(&self) -> &BasePlan<VM> {
253        &self.common.base
254    }
255
256    fn base_mut(&mut self) -> &mut BasePlan<Self::VM> {
257        &mut self.common.base
258    }
259
260    fn common(&self) -> &CommonPlan<VM> {
261        &self.common
262    }
263
264    fn notify_mutators_paused(&self, _scheduler: &GCWorkScheduler<VM>) {
265        use crate::vm::ActivePlan;
266        let pause = self.current_pause().unwrap();
267        match pause {
268            Pause::Full => {
269                self.set_concurrent_marking_state(false);
270            }
271            Pause::InitialMark => {
272                debug_assert!(
273                    !self.concurrent_marking_in_progress(),
274                    "prev pause: {:?}",
275                    self.previous_pause().unwrap()
276                );
277            }
278            Pause::FinalMark => {
279                debug_assert!(self.concurrent_marking_in_progress());
280                // Flush barrier buffers
281                for mutator in <VM as VMBinding>::VMActivePlan::mutators() {
282                    mutator.barrier.flush();
283                }
284                self.set_concurrent_marking_state(false);
285            }
286        }
287        info!("{:?} start", pause);
288    }
289
290    fn concurrent(&self) -> Option<&dyn ConcurrentPlan<VM = VM>> {
291        Some(self)
292    }
293}
294
295impl<VM: VMBinding> ConcurrentImmix<VM> {
296    pub fn new(args: CreateGeneralPlanArgs<VM>) -> Self {
297        let spec = crate::util::metadata::extract_side_metadata(&[
298            *VM::VMObjectModel::GLOBAL_LOG_BIT_SPEC,
299        ]);
300
301        let mut plan_args = CreateSpecificPlanArgs {
302            global_args: args,
303            constraints: &CONCURRENT_IMMIX_CONSTRAINTS,
304            global_side_metadata_specs: SideMetadataContext::new_global_specs(&spec),
305        };
306
307        let immix_args = ImmixSpaceArgs {
308            mixed_age: false,
309            never_move_objects: false,
310        };
311
312        // These buckets are not used in an Immix plan. We can simply disable them.
313        // TODO: We should be more systmatic on this, and disable unnecessary buckets for other plans as well.
314        let scheduler = &plan_args.global_args.scheduler;
315        scheduler.work_buckets[WorkBucketStage::VMRefForwarding].set_enabled(false);
316        scheduler.work_buckets[WorkBucketStage::CalculateForwarding].set_enabled(false);
317        scheduler.work_buckets[WorkBucketStage::SecondRoots].set_enabled(false);
318        scheduler.work_buckets[WorkBucketStage::RefForwarding].set_enabled(false);
319        scheduler.work_buckets[WorkBucketStage::FinalizableForwarding].set_enabled(false);
320        scheduler.work_buckets[WorkBucketStage::Compact].set_enabled(false);
321
322        let immix = ConcurrentImmix {
323            immix_space: ImmixSpace::new(
324                plan_args.get_normal_space_args("immix", true, false, VMRequest::discontiguous()),
325                immix_args,
326            ),
327            common: CommonPlan::new(plan_args),
328            last_gc_was_defrag: AtomicBool::new(false),
329            current_pause: Atomic::new(None),
330            previous_pause: Atomic::new(None),
331            should_do_full_gc: AtomicBool::new(false),
332            concurrent_marking_active: AtomicBool::new(false),
333        };
334
335        immix.verify_side_metadata_sanity();
336
337        immix
338    }
339
340    fn set_ref_closure_buckets_enabled(&self, do_closure: bool) {
341        let scheduler = &self.common.base.scheduler;
342        scheduler.work_buckets[WorkBucketStage::VMRefClosure].set_enabled(do_closure);
343        scheduler.work_buckets[WorkBucketStage::WeakRefClosure].set_enabled(do_closure);
344        scheduler.work_buckets[WorkBucketStage::FinalRefClosure].set_enabled(do_closure);
345        scheduler.work_buckets[WorkBucketStage::SoftRefClosure].set_enabled(do_closure);
346        scheduler.work_buckets[WorkBucketStage::PhantomRefClosure].set_enabled(do_closure);
347    }
348
349    pub(crate) fn schedule_concurrent_marking_initial_pause(
350        &'static self,
351        scheduler: &GCWorkScheduler<VM>,
352    ) {
353        use crate::scheduler::gc_work::Prepare;
354
355        self.set_ref_closure_buckets_enabled(false);
356
357        scheduler.work_buckets[WorkBucketStage::Unconstrained].add(StopMutators::<
358            ConcurrentImmixGCWorkContext<ProcessRootSlots<VM, Self, TRACE_KIND_FAST>>,
359        >::new());
360        scheduler.work_buckets[WorkBucketStage::Prepare].add(Prepare::<
361            ConcurrentImmixGCWorkContext<UnsupportedProcessEdges<VM>>,
362        >::new(self));
363    }
364
365    fn schedule_concurrent_marking_final_pause(&'static self, scheduler: &GCWorkScheduler<VM>) {
366        self.set_ref_closure_buckets_enabled(true);
367
368        // Skip root scanning in the final mark
369        scheduler.work_buckets[WorkBucketStage::Unconstrained].add(StopMutators::<
370            ConcurrentImmixGCWorkContext<ProcessRootSlots<VM, Self, TRACE_KIND_FAST>>,
371        >::new_no_scan_roots());
372
373        scheduler.work_buckets[WorkBucketStage::Release].add(Release::<
374            ConcurrentImmixGCWorkContext<UnsupportedProcessEdges<VM>>,
375        >::new(self));
376
377        // Deal with weak ref and finalizers
378        // TODO: Check against schedule_common_work and see if we are still missing any work packet
379        type RefProcessingEdges<VM> =
380            crate::scheduler::gc_work::PlanProcessEdges<VM, ConcurrentImmix<VM>, TRACE_KIND_FAST>;
381        // Reference processing
382        if !*self.base().options.no_reference_types {
383            use crate::util::reference_processor::{
384                PhantomRefProcessing, SoftRefProcessing, WeakRefProcessing,
385            };
386            scheduler.work_buckets[WorkBucketStage::SoftRefClosure]
387                .add(SoftRefProcessing::<RefProcessingEdges<VM>>::new());
388            scheduler.work_buckets[WorkBucketStage::WeakRefClosure]
389                .add(WeakRefProcessing::<VM>::new());
390            scheduler.work_buckets[WorkBucketStage::PhantomRefClosure]
391                .add(PhantomRefProcessing::<VM>::new());
392
393            use crate::util::reference_processor::RefEnqueue;
394            scheduler.work_buckets[WorkBucketStage::Release].add(RefEnqueue::<VM>::new());
395        }
396
397        // Finalization
398        if !*self.base().options.no_finalizer {
399            use crate::util::finalizable_processor::Finalization;
400            // finalization
401            scheduler.work_buckets[WorkBucketStage::FinalRefClosure]
402                .add(Finalization::<RefProcessingEdges<VM>>::new());
403        }
404
405        // VM-specific weak ref processing
406        // Note that ConcurrentImmix does not have a separate forwarding stage,
407        // so we don't schedule the `VMForwardWeakRefs` work packet.
408        scheduler.work_buckets[WorkBucketStage::VMRefClosure]
409            .set_sentinel(Box::new(VMProcessWeakRefs::<RefProcessingEdges<VM>>::new()));
410    }
411
412    pub fn concurrent_marking_in_progress(&self) -> bool {
413        self.concurrent_marking_active.load(Ordering::Acquire)
414    }
415
416    fn set_concurrent_marking_state(&self, active: bool) {
417        use crate::plan::global::HasSpaces;
418
419        // Tell the spaces to allocate new objects as live
420        let allocate_object_as_live = active;
421        self.for_each_space(&mut |space: &dyn Space<VM>| {
422            space.set_allocate_as_live(allocate_object_as_live);
423        });
424
425        // Store the state.
426        self.concurrent_marking_active
427            .store(active, Ordering::SeqCst);
428
429        // We also set SATB barrier as active -- this is done in Mutator prepare/release.
430    }
431
432    pub(super) fn is_concurrent_marking_active(&self) -> bool {
433        self.concurrent_marking_active.load(Ordering::SeqCst)
434    }
435
436    fn previous_pause(&self) -> Option<Pause> {
437        self.previous_pause.load(Ordering::SeqCst)
438    }
439}
440
441impl<VM: VMBinding> ConcurrentPlan for ConcurrentImmix<VM> {
442    fn current_pause(&self) -> Option<Pause> {
443        self.current_pause.load(Ordering::SeqCst)
444    }
445
446    fn concurrent_work_in_progress(&self) -> bool {
447        self.concurrent_marking_in_progress()
448    }
449}