mmtk/plan/compressor/
global.rs

1use super::gc_work::CompressorWorkContext;
2use super::gc_work::{
3    AfterCompact, ForwardingProcessEdges, GenerateWork, MarkingProcessEdges, UpdateReferences,
4};
5use crate::plan::compressor::mutator::ALLOCATOR_MAPPING;
6use crate::plan::global::CreateGeneralPlanArgs;
7use crate::plan::global::CreateSpecificPlanArgs;
8use crate::plan::global::{BasePlan, CommonPlan};
9use crate::plan::plan_constraints::MAX_NON_LOS_ALLOC_BYTES_COPYING_PLAN;
10use crate::plan::{AllocationSemantics, Plan, PlanConstraints};
11use crate::policy::compressor::CompressorSpace;
12use crate::policy::space::Space;
13use crate::scheduler::gc_work::*;
14use crate::scheduler::{GCWorkScheduler, WorkBucketStage};
15use crate::util::alloc::allocators::AllocatorSelector;
16use crate::util::heap::gc_trigger::SpaceStats;
17#[allow(unused_imports)]
18use crate::util::heap::VMRequest;
19use crate::util::metadata::side_metadata::SideMetadataContext;
20use crate::util::opaque_pointer::*;
21use crate::vm::VMBinding;
22use enum_map::EnumMap;
23use mmtk_macros::{HasSpaces, PlanTraceObject};
24
25/// [`Compressor`] implements a stop-the-world and parallel implementation of
26/// the Compressor, as described in Kermany and Petrank,
27/// [The Compressor: concurrent, incremental, and parallel compaction](https://dl.acm.org/doi/10.1145/1133255.1134023).
28#[derive(HasSpaces, PlanTraceObject)]
29pub struct Compressor<VM: VMBinding> {
30    #[parent]
31    pub common: CommonPlan<VM>,
32    #[space]
33    pub compressor_space: CompressorSpace<VM>,
34}
35
36/// The plan constraints for the Compressor plan.
37pub const COMPRESSOR_CONSTRAINTS: PlanConstraints = PlanConstraints {
38    max_non_los_default_alloc_bytes: MAX_NON_LOS_ALLOC_BYTES_COPYING_PLAN,
39    moves_objects: true,
40    needs_forward_after_liveness: true,
41    ..PlanConstraints::default()
42};
43
44impl<VM: VMBinding> Plan for Compressor<VM> {
45    fn constraints(&self) -> &'static PlanConstraints {
46        &COMPRESSOR_CONSTRAINTS
47    }
48
49    fn collection_required(&self, space_full: bool, _space: Option<SpaceStats<Self::VM>>) -> bool {
50        self.base().collection_required(self, space_full)
51    }
52
53    fn common(&self) -> &CommonPlan<VM> {
54        &self.common
55    }
56
57    fn base(&self) -> &BasePlan<VM> {
58        &self.common.base
59    }
60
61    fn base_mut(&mut self) -> &mut BasePlan<Self::VM> {
62        &mut self.common.base
63    }
64
65    fn prepare(&mut self, tls: VMWorkerThread) {
66        self.common.prepare(tls, true);
67        self.compressor_space.prepare();
68    }
69
70    fn release(&mut self, tls: VMWorkerThread) {
71        self.common.release(tls, true);
72        self.compressor_space.release();
73    }
74
75    fn end_of_gc(&mut self, tls: VMWorkerThread) {
76        self.common.end_of_gc(tls);
77    }
78
79    fn get_allocator_mapping(&self) -> &'static EnumMap<AllocationSemantics, AllocatorSelector> {
80        &ALLOCATOR_MAPPING
81    }
82
83    fn schedule_collection(&'static self, scheduler: &GCWorkScheduler<VM>) {
84        // TODO use schedule_common once it can work with the Compressor
85        // The main issue there is that we need to ForwardingProcessEdges
86        // in FinalizableForwarding.
87
88        // Stop & scan mutators (mutator scanning can happen before STW)
89        scheduler.work_buckets[WorkBucketStage::Unconstrained]
90            .add(StopMutators::<CompressorWorkContext<VM>>::new());
91
92        // Prepare global/collectors/mutators
93        scheduler.work_buckets[WorkBucketStage::Prepare]
94            .add(Prepare::<CompressorWorkContext<VM>>::new(self));
95
96        scheduler.work_buckets[WorkBucketStage::CalculateForwarding].add(GenerateWork::new(
97            &self.compressor_space,
98            CompressorSpace::<VM>::add_offset_vector_tasks,
99        ));
100
101        // scan roots to update their references
102        scheduler.work_buckets[WorkBucketStage::SecondRoots].add(UpdateReferences::<VM>::new());
103
104        scheduler.work_buckets[WorkBucketStage::Compact].add(GenerateWork::new(
105            &self.compressor_space,
106            CompressorSpace::<VM>::add_compact_tasks,
107        ));
108
109        scheduler.work_buckets[WorkBucketStage::Compact].set_sentinel(Box::new(
110            AfterCompact::<VM>::new(&self.compressor_space, &self.common.los),
111        ));
112
113        // Release global/collectors/mutators
114        scheduler.work_buckets[WorkBucketStage::Release]
115            .add(Release::<CompressorWorkContext<VM>>::new(self));
116
117        // Reference processing
118        if !*self.base().options.no_reference_types {
119            use crate::util::reference_processor::{
120                PhantomRefProcessing, SoftRefProcessing, WeakRefProcessing,
121            };
122            scheduler.work_buckets[WorkBucketStage::SoftRefClosure]
123                .add(SoftRefProcessing::<MarkingProcessEdges<VM>>::new());
124            scheduler.work_buckets[WorkBucketStage::WeakRefClosure]
125                .add(WeakRefProcessing::<VM>::new());
126            scheduler.work_buckets[WorkBucketStage::PhantomRefClosure]
127                .add(PhantomRefProcessing::<VM>::new());
128
129            use crate::util::reference_processor::RefForwarding;
130            scheduler.work_buckets[WorkBucketStage::RefForwarding]
131                .add(RefForwarding::<ForwardingProcessEdges<VM>>::new());
132
133            use crate::util::reference_processor::RefEnqueue;
134            scheduler.work_buckets[WorkBucketStage::Release].add(RefEnqueue::<VM>::new());
135        }
136
137        // Finalization
138        if !*self.base().options.no_finalizer {
139            use crate::util::finalizable_processor::{Finalization, ForwardFinalization};
140            // finalization
141            // treat finalizable objects as roots and perform a closure (marking)
142            // must be done before calculating forwarding pointers
143            scheduler.work_buckets[WorkBucketStage::FinalRefClosure]
144                .add(Finalization::<MarkingProcessEdges<VM>>::new());
145            // update finalizable object references
146            // must be done before compacting
147            scheduler.work_buckets[WorkBucketStage::FinalizableForwarding]
148                .add(ForwardFinalization::<ForwardingProcessEdges<VM>>::new());
149        }
150
151        // VM-specific weak ref processing
152        scheduler.work_buckets[WorkBucketStage::VMRefClosure]
153            .set_sentinel(Box::new(VMProcessWeakRefs::<MarkingProcessEdges<VM>>::new()));
154
155        // VM-specific weak ref forwarding
156        scheduler.work_buckets[WorkBucketStage::VMRefForwarding]
157            .add(VMForwardWeakRefs::<ForwardingProcessEdges<VM>>::new());
158
159        // VM-specific work after forwarding, possible to implement ref enququing.
160        scheduler.work_buckets[WorkBucketStage::Release].add(VMPostForwarding::<VM>::default());
161
162        // Analysis GC work
163        #[cfg(feature = "analysis")]
164        {
165            use crate::util::analysis::GcHookWork;
166            scheduler.work_buckets[WorkBucketStage::Unconstrained].add(GcHookWork);
167        }
168        #[cfg(feature = "sanity")]
169        scheduler.work_buckets[WorkBucketStage::Final]
170            .add(crate::util::sanity::sanity_checker::ScheduleSanityGC::<Self>::new(self));
171    }
172
173    fn current_gc_may_move_object(&self) -> bool {
174        true
175    }
176
177    fn get_used_pages(&self) -> usize {
178        self.compressor_space.reserved_pages() + self.common.get_used_pages()
179    }
180}
181
182impl<VM: VMBinding> Compressor<VM> {
183    pub fn new(args: CreateGeneralPlanArgs<VM>) -> Self {
184        let mut plan_args = CreateSpecificPlanArgs {
185            global_args: args,
186            constraints: &COMPRESSOR_CONSTRAINTS,
187            global_side_metadata_specs: SideMetadataContext::new_global_specs(&[]),
188        };
189
190        let res = Compressor {
191            compressor_space: CompressorSpace::new(plan_args.get_normal_space_args(
192                "compressor_space",
193                true,
194                false,
195                VMRequest::discontiguous(),
196            )),
197            common: CommonPlan::new(plan_args),
198        };
199
200        res.verify_side_metadata_sanity();
201
202        res
203    }
204}