mmtk/plan/markcompact/
global.rs

1use super::gc_work::MarkCompactGCWorkContext;
2use super::gc_work::{
3    CalculateForwardingAddress, Compact, ForwardingProcessEdges, MarkingProcessEdges,
4    UpdateReferences,
5};
6use crate::plan::global::CommonPlan;
7use crate::plan::global::{BasePlan, CreateGeneralPlanArgs, CreateSpecificPlanArgs};
8use crate::plan::markcompact::mutator::ALLOCATOR_MAPPING;
9use crate::plan::AllocationSemantics;
10use crate::plan::Plan;
11use crate::plan::PlanConstraints;
12use crate::policy::markcompactspace::MarkCompactSpace;
13use crate::policy::space::Space;
14use crate::scheduler::gc_work::*;
15use crate::scheduler::*;
16use crate::util::alloc::allocators::AllocatorSelector;
17use crate::util::copy::CopySemantics;
18use crate::util::heap::gc_trigger::SpaceStats;
19use crate::util::heap::VMRequest;
20use crate::util::metadata::side_metadata::SideMetadataContext;
21#[cfg(not(feature = "vo_bit"))]
22use crate::util::metadata::vo_bit::VO_BIT_SIDE_METADATA_SPEC;
23use crate::util::opaque_pointer::*;
24use crate::vm::VMBinding;
25
26use enum_map::EnumMap;
27
28use mmtk_macros::{HasSpaces, PlanTraceObject};
29
30#[derive(HasSpaces, PlanTraceObject)]
31pub struct MarkCompact<VM: VMBinding> {
32    #[space]
33    #[copy_semantics(CopySemantics::DefaultCopy)]
34    pub mc_space: MarkCompactSpace<VM>,
35    #[parent]
36    pub common: CommonPlan<VM>,
37}
38
39/// The plan constraints for the mark compact plan.
40pub const MARKCOMPACT_CONSTRAINTS: PlanConstraints = PlanConstraints {
41    moves_objects: true,
42    needs_forward_after_liveness: true,
43    max_non_los_default_alloc_bytes:
44        crate::plan::plan_constraints::MAX_NON_LOS_ALLOC_BYTES_COPYING_PLAN,
45    ..PlanConstraints::default()
46};
47
48impl<VM: VMBinding> Plan for MarkCompact<VM> {
49    fn constraints(&self) -> &'static PlanConstraints {
50        &MARKCOMPACT_CONSTRAINTS
51    }
52
53    fn base(&self) -> &BasePlan<VM> {
54        &self.common.base
55    }
56
57    fn base_mut(&mut self) -> &mut BasePlan<Self::VM> {
58        &mut self.common.base
59    }
60
61    fn common(&self) -> &CommonPlan<VM> {
62        &self.common
63    }
64
65    fn prepare(&mut self, _tls: VMWorkerThread) {
66        self.common.prepare(_tls, true);
67        self.mc_space.prepare();
68    }
69
70    fn release(&mut self, _tls: VMWorkerThread) {
71        self.common.release(_tls, true);
72        self.mc_space.release();
73    }
74
75    fn end_of_gc(&mut self, tls: VMWorkerThread) {
76        self.common.end_of_gc(tls);
77    }
78
79    fn get_allocator_mapping(&self) -> &'static EnumMap<AllocationSemantics, AllocatorSelector> {
80        &ALLOCATOR_MAPPING
81    }
82
83    fn schedule_collection(&'static self, scheduler: &GCWorkScheduler<VM>) {
84        // TODO use schedule_common once it can work with markcompact
85        // self.common()
86        //     .schedule_common::<Self, MarkingProcessEdges<VM>, NoCopy<VM>>(
87        //         self,
88        //         &MARKCOMPACT_CONSTRAINTS,
89        //         scheduler,
90        //     );
91
92        // Stop & scan mutators (mutator scanning can happen before STW)
93        scheduler.work_buckets[WorkBucketStage::Unconstrained]
94            .add(StopMutators::<MarkCompactGCWorkContext<VM>>::new());
95
96        // Prepare global/collectors/mutators
97        scheduler.work_buckets[WorkBucketStage::Prepare]
98            .add(Prepare::<MarkCompactGCWorkContext<VM>>::new(self));
99
100        scheduler.work_buckets[WorkBucketStage::CalculateForwarding]
101            .add(CalculateForwardingAddress::<VM>::new(&self.mc_space));
102        // do another trace to update references
103        scheduler.work_buckets[WorkBucketStage::SecondRoots].add(UpdateReferences::<VM>::new(self));
104        scheduler.work_buckets[WorkBucketStage::Compact].add(Compact::<VM>::new(&self.mc_space));
105
106        // Release global/collectors/mutators
107        scheduler.work_buckets[WorkBucketStage::Release]
108            .add(Release::<MarkCompactGCWorkContext<VM>>::new(self));
109
110        // Reference processing
111        if !*self.base().options.no_reference_types {
112            use crate::util::reference_processor::{
113                PhantomRefProcessing, SoftRefProcessing, WeakRefProcessing,
114            };
115            scheduler.work_buckets[WorkBucketStage::SoftRefClosure]
116                .add(SoftRefProcessing::<MarkingProcessEdges<VM>>::new());
117            scheduler.work_buckets[WorkBucketStage::WeakRefClosure]
118                .add(WeakRefProcessing::<VM>::new());
119            scheduler.work_buckets[WorkBucketStage::PhantomRefClosure]
120                .add(PhantomRefProcessing::<VM>::new());
121
122            use crate::util::reference_processor::RefForwarding;
123            scheduler.work_buckets[WorkBucketStage::RefForwarding]
124                .add(RefForwarding::<ForwardingProcessEdges<VM>>::new());
125
126            use crate::util::reference_processor::RefEnqueue;
127            scheduler.work_buckets[WorkBucketStage::Release].add(RefEnqueue::<VM>::new());
128        }
129
130        // Finalization
131        if !*self.base().options.no_finalizer {
132            use crate::util::finalizable_processor::{Finalization, ForwardFinalization};
133            // finalization
134            // treat finalizable objects as roots and perform a closure (marking)
135            // must be done before calculating forwarding pointers
136            scheduler.work_buckets[WorkBucketStage::FinalRefClosure]
137                .add(Finalization::<MarkingProcessEdges<VM>>::new());
138            // update finalizable object references
139            // must be done before compacting
140            scheduler.work_buckets[WorkBucketStage::FinalizableForwarding]
141                .add(ForwardFinalization::<ForwardingProcessEdges<VM>>::new());
142        }
143
144        // VM-specific weak ref processing
145        scheduler.work_buckets[WorkBucketStage::VMRefClosure]
146            .set_sentinel(Box::new(VMProcessWeakRefs::<MarkingProcessEdges<VM>>::new()));
147
148        // VM-specific weak ref forwarding
149        scheduler.work_buckets[WorkBucketStage::VMRefForwarding]
150            .add(VMForwardWeakRefs::<ForwardingProcessEdges<VM>>::new());
151
152        // VM-specific work after forwarding, possible to implement ref enququing.
153        scheduler.work_buckets[WorkBucketStage::Release].add(VMPostForwarding::<VM>::default());
154
155        // Analysis GC work
156        #[cfg(feature = "analysis")]
157        {
158            use crate::util::analysis::GcHookWork;
159            scheduler.work_buckets[WorkBucketStage::Unconstrained].add(GcHookWork);
160        }
161        #[cfg(feature = "sanity")]
162        scheduler.work_buckets[WorkBucketStage::Final]
163            .add(crate::util::sanity::sanity_checker::ScheduleSanityGC::<Self>::new(self));
164    }
165
166    fn collection_required(&self, space_full: bool, _space: Option<SpaceStats<Self::VM>>) -> bool {
167        self.base().collection_required(self, space_full)
168    }
169
170    fn get_used_pages(&self) -> usize {
171        self.mc_space.reserved_pages() + self.common.get_used_pages()
172    }
173
174    fn get_collection_reserved_pages(&self) -> usize {
175        0
176    }
177
178    fn current_gc_may_move_object(&self) -> bool {
179        true
180    }
181}
182
183impl<VM: VMBinding> MarkCompact<VM> {
184    pub fn new(args: CreateGeneralPlanArgs<VM>) -> Self {
185        // if vo_bit is enabled, VO_BIT_SIDE_METADATA_SPEC will be added to
186        // SideMetadataContext by default, so we don't need to add it here.
187        #[cfg(feature = "vo_bit")]
188        let global_side_metadata_specs = SideMetadataContext::new_global_specs(&[]);
189        // if vo_bit is NOT enabled,
190        // we need to add VO_BIT_SIDE_METADATA_SPEC to SideMetadataContext here.
191        #[cfg(not(feature = "vo_bit"))]
192        let global_side_metadata_specs =
193            SideMetadataContext::new_global_specs(&[VO_BIT_SIDE_METADATA_SPEC]);
194
195        let mut plan_args = CreateSpecificPlanArgs {
196            global_args: args,
197            constraints: &MARKCOMPACT_CONSTRAINTS,
198            global_side_metadata_specs,
199        };
200
201        let mc_space = MarkCompactSpace::new(plan_args.get_normal_space_args(
202            "mc",
203            true,
204            false,
205            VMRequest::discontiguous(),
206        ));
207
208        let res = MarkCompact {
209            mc_space,
210            common: CommonPlan::new(plan_args),
211        };
212
213        res.verify_side_metadata_sanity();
214
215        res
216    }
217}
218
219impl<VM: VMBinding> MarkCompact<VM> {
220    pub fn mc_space(&self) -> &MarkCompactSpace<VM> {
221        &self.mc_space
222    }
223}