mmtk/plan/marksweep/
global.rs1use crate::plan::global::BasePlan;
2use crate::plan::global::CommonPlan;
3use crate::plan::global::CreateGeneralPlanArgs;
4use crate::plan::global::CreateSpecificPlanArgs;
5use crate::plan::marksweep::gc_work::MSGCWorkContext;
6use crate::plan::marksweep::mutator::ALLOCATOR_MAPPING;
7use crate::plan::AllocationSemantics;
8use crate::plan::Plan;
9use crate::plan::PlanConstraints;
10use crate::policy::space::Space;
11use crate::scheduler::GCWorkScheduler;
12use crate::util::alloc::allocators::AllocatorSelector;
13use crate::util::heap::gc_trigger::SpaceStats;
14use crate::util::heap::VMRequest;
15use crate::util::metadata::side_metadata::SideMetadataContext;
16use crate::util::VMWorkerThread;
17use crate::vm::VMBinding;
18use enum_map::EnumMap;
19use mmtk_macros::{HasSpaces, PlanTraceObject};
20
21#[cfg(feature = "malloc_mark_sweep")]
22pub type MarkSweepSpace<VM> = crate::policy::marksweepspace::malloc_ms::MallocSpace<VM>;
23#[cfg(feature = "malloc_mark_sweep")]
24use crate::policy::marksweepspace::malloc_ms::MAX_OBJECT_SIZE;
25
26#[cfg(not(feature = "malloc_mark_sweep"))]
27pub type MarkSweepSpace<VM> = crate::policy::marksweepspace::native_ms::MarkSweepSpace<VM>;
28#[cfg(not(feature = "malloc_mark_sweep"))]
29use crate::policy::marksweepspace::native_ms::MAX_OBJECT_SIZE;
30
31#[derive(HasSpaces, PlanTraceObject)]
32pub struct MarkSweep<VM: VMBinding> {
33 #[parent]
34 common: CommonPlan<VM>,
35 #[space]
36 ms: MarkSweepSpace<VM>,
37}
38
39pub const MS_CONSTRAINTS: PlanConstraints = PlanConstraints {
41 moves_objects: false,
42 max_non_los_default_alloc_bytes: MAX_OBJECT_SIZE,
43 may_trace_duplicate_edges: true,
44 needs_prepare_mutator: (!cfg!(feature = "malloc_mark_sweep")
45 && !cfg!(feature = "eager_sweeping"))
46 || PlanConstraints::default().needs_prepare_mutator,
47 ..PlanConstraints::default()
48};
49
50impl<VM: VMBinding> Plan for MarkSweep<VM> {
51 fn schedule_collection(&'static self, scheduler: &GCWorkScheduler<VM>) {
52 scheduler.schedule_common_work::<MSGCWorkContext<VM>>(self);
53 }
54
55 fn get_allocator_mapping(&self) -> &'static EnumMap<AllocationSemantics, AllocatorSelector> {
56 &ALLOCATOR_MAPPING
57 }
58
59 fn prepare(&mut self, tls: VMWorkerThread) {
60 self.common.prepare(tls, true);
61 self.ms.prepare(true);
62 }
63
64 fn release(&mut self, tls: VMWorkerThread) {
65 self.ms.release();
66 self.common.release(tls, true);
67 }
68
69 fn end_of_gc(&mut self, tls: VMWorkerThread) {
70 self.ms.end_of_gc();
71 self.common.end_of_gc(tls);
72 }
73
74 fn collection_required(&self, space_full: bool, _space: Option<SpaceStats<Self::VM>>) -> bool {
75 self.base().collection_required(self, space_full)
76 }
77
78 fn current_gc_may_move_object(&self) -> bool {
79 false
80 }
81
82 fn get_used_pages(&self) -> usize {
83 self.common.get_used_pages() + self.ms.reserved_pages()
84 }
85
86 fn base(&self) -> &BasePlan<VM> {
87 &self.common.base
88 }
89
90 fn base_mut(&mut self) -> &mut BasePlan<Self::VM> {
91 &mut self.common.base
92 }
93
94 fn common(&self) -> &CommonPlan<VM> {
95 &self.common
96 }
97
98 fn constraints(&self) -> &'static PlanConstraints {
99 &MS_CONSTRAINTS
100 }
101}
102
103impl<VM: VMBinding> MarkSweep<VM> {
104 pub fn new(args: CreateGeneralPlanArgs<VM>) -> Self {
105 let mut global_side_metadata_specs = SideMetadataContext::new_global_specs(&[]);
106 MarkSweepSpace::<VM>::extend_global_side_metadata_specs(&mut global_side_metadata_specs);
107
108 let mut plan_args = CreateSpecificPlanArgs {
109 global_args: args,
110 constraints: &MS_CONSTRAINTS,
111 global_side_metadata_specs,
112 };
113
114 let res = MarkSweep {
115 ms: MarkSweepSpace::new(plan_args.get_normal_space_args(
116 "ms",
117 true,
118 false,
119 VMRequest::discontiguous(),
120 )),
121 common: CommonPlan::new(plan_args),
122 };
123
124 res.verify_side_metadata_sanity();
125
126 res
127 }
128
129 pub fn ms_space(&self) -> &MarkSweepSpace<VM> {
130 &self.ms
131 }
132}