mmtk/plan/immix/
global.rs1use super::gc_work::ImmixGCWorkContext;
2use super::mutator::ALLOCATOR_MAPPING;
3use crate::plan::global::BasePlan;
4use crate::plan::global::CommonPlan;
5use crate::plan::global::CreateGeneralPlanArgs;
6use crate::plan::global::CreateSpecificPlanArgs;
7use crate::plan::AllocationSemantics;
8use crate::plan::Plan;
9use crate::plan::PlanConstraints;
10use crate::policy::immix::ImmixSpaceArgs;
11use crate::policy::immix::{TRACE_KIND_DEFRAG, TRACE_KIND_FAST};
12use crate::policy::space::Space;
13use crate::scheduler::*;
14use crate::util::alloc::allocators::AllocatorSelector;
15use crate::util::copy::*;
16use crate::util::heap::gc_trigger::SpaceStats;
17use crate::util::heap::VMRequest;
18use crate::util::metadata::log_bit::UnlogBitsOperation;
19use crate::util::metadata::side_metadata::SideMetadataContext;
20use crate::vm::VMBinding;
21use crate::{policy::immix::ImmixSpace, util::opaque_pointer::VMWorkerThread};
22use std::sync::atomic::AtomicBool;
23
24use atomic::Ordering;
25use enum_map::EnumMap;
26
27use mmtk_macros::{HasSpaces, PlanTraceObject};
28
29#[derive(HasSpaces, PlanTraceObject)]
30pub struct Immix<VM: VMBinding> {
31 #[post_scan]
32 #[space]
33 #[copy_semantics(CopySemantics::DefaultCopy)]
34 pub immix_space: ImmixSpace<VM>,
35 #[parent]
36 pub common: CommonPlan<VM>,
37 last_gc_was_defrag: AtomicBool,
38}
39
40pub const IMMIX_CONSTRAINTS: PlanConstraints = PlanConstraints {
42 moves_objects: !cfg!(feature = "immix_non_moving"),
44 max_non_los_default_alloc_bytes: crate::policy::immix::MAX_IMMIX_OBJECT_SIZE,
46 ..PlanConstraints::default()
47};
48
49impl<VM: VMBinding> Plan for Immix<VM> {
50 fn collection_required(&self, space_full: bool, _space: Option<SpaceStats<Self::VM>>) -> bool {
51 self.base().collection_required(self, space_full)
52 }
53
54 fn last_collection_was_exhaustive(&self) -> bool {
55 self.immix_space
56 .is_last_gc_exhaustive(self.last_gc_was_defrag.load(Ordering::Relaxed))
57 }
58
59 fn constraints(&self) -> &'static PlanConstraints {
60 &IMMIX_CONSTRAINTS
61 }
62
63 fn create_copy_config(&'static self) -> CopyConfig<Self::VM> {
64 use enum_map::enum_map;
65 CopyConfig {
66 copy_mapping: enum_map! {
67 CopySemantics::DefaultCopy => CopySelector::Immix(0),
68 _ => CopySelector::Unused,
69 },
70 space_mapping: vec![(CopySelector::Immix(0), &self.immix_space)],
71 constraints: &IMMIX_CONSTRAINTS,
72 }
73 }
74
75 fn schedule_collection(&'static self, scheduler: &GCWorkScheduler<VM>) {
76 Self::schedule_immix_full_heap_collection::<
77 Immix<VM>,
78 ImmixGCWorkContext<VM, TRACE_KIND_FAST>,
79 ImmixGCWorkContext<VM, TRACE_KIND_DEFRAG>,
80 >(self, &self.immix_space, scheduler)
81 }
82
83 fn get_allocator_mapping(&self) -> &'static EnumMap<AllocationSemantics, AllocatorSelector> {
84 &ALLOCATOR_MAPPING
85 }
86
87 fn prepare(&mut self, tls: VMWorkerThread) {
88 self.prepare_inner(tls, UnlogBitsOperation::NoOp)
89 }
90
91 fn release(&mut self, tls: VMWorkerThread) {
92 self.release_inner(tls, UnlogBitsOperation::NoOp);
93 }
94
95 fn end_of_gc(&mut self, tls: VMWorkerThread) {
96 self.last_gc_was_defrag
97 .store(self.immix_space.end_of_gc(), Ordering::Relaxed);
98 self.common.end_of_gc(tls);
99 }
100
101 fn current_gc_may_move_object(&self) -> bool {
102 self.immix_space.in_defrag()
103 }
104
105 fn get_collection_reserved_pages(&self) -> usize {
106 self.immix_space.defrag_headroom_pages()
107 }
108
109 fn get_used_pages(&self) -> usize {
110 self.immix_space.reserved_pages() + self.common.get_used_pages()
111 }
112
113 fn base(&self) -> &BasePlan<VM> {
114 &self.common.base
115 }
116
117 fn base_mut(&mut self) -> &mut BasePlan<Self::VM> {
118 &mut self.common.base
119 }
120
121 fn common(&self) -> &CommonPlan<VM> {
122 &self.common
123 }
124}
125
126impl<VM: VMBinding> Immix<VM> {
127 pub fn new(args: CreateGeneralPlanArgs<VM>) -> Self {
128 let plan_args = CreateSpecificPlanArgs {
129 global_args: args,
130 constraints: &IMMIX_CONSTRAINTS,
131 global_side_metadata_specs: SideMetadataContext::new_global_specs(&[]),
132 };
133 Self::new_with_args(
134 plan_args,
135 ImmixSpaceArgs {
136 mixed_age: false,
137 never_move_objects: false,
138 },
139 )
140 }
141
142 pub fn new_with_args(
143 mut plan_args: CreateSpecificPlanArgs<VM>,
144 space_args: ImmixSpaceArgs,
145 ) -> Self {
146 let immix = Immix {
147 immix_space: ImmixSpace::new(
148 if space_args.mixed_age {
149 plan_args.get_mixed_age_space_args(
150 "immix",
151 true,
152 false,
153 VMRequest::discontiguous(),
154 )
155 } else {
156 plan_args.get_normal_space_args(
157 "immix",
158 true,
159 false,
160 VMRequest::discontiguous(),
161 )
162 },
163 space_args,
164 ),
165 common: CommonPlan::new(plan_args),
166 last_gc_was_defrag: AtomicBool::new(false),
167 };
168
169 immix.verify_side_metadata_sanity();
170
171 immix
172 }
173
174 pub(crate) fn schedule_immix_full_heap_collection<
177 PlanType: Plan<VM = VM>,
178 FastContext: GCWorkContext<VM = VM, PlanType = PlanType>,
179 DefragContext: GCWorkContext<VM = VM, PlanType = PlanType>,
180 >(
181 plan: &'static DefragContext::PlanType,
182 immix_space: &ImmixSpace<VM>,
183 scheduler: &GCWorkScheduler<VM>,
184 ) {
185 let in_defrag = immix_space.decide_whether_to_defrag(
186 plan.base().global_state.is_emergency_collection(),
187 true,
188 plan.base()
189 .global_state
190 .cur_collection_attempts
191 .load(Ordering::SeqCst),
192 plan.base().global_state.is_user_triggered_collection(),
193 *plan.base().options.full_heap_system_gc,
194 );
195
196 if in_defrag {
197 scheduler.schedule_common_work::<DefragContext>(plan);
198 } else {
199 scheduler.schedule_common_work::<FastContext>(plan);
200 }
201 }
202
203 pub(in crate::plan) fn set_last_gc_was_defrag(&self, defrag: bool, order: Ordering) {
204 self.last_gc_was_defrag.store(defrag, order)
205 }
206
207 pub(in crate::plan) fn prepare_inner(
210 &mut self,
211 tls: VMWorkerThread,
212 unlog_bits_op: UnlogBitsOperation,
213 ) {
214 self.common.prepare(tls, true);
215 self.immix_space.prepare(
216 true,
217 Some(crate::policy::immix::defrag::StatsForDefrag::new(self)),
218 unlog_bits_op,
219 );
220 }
221
222 pub(in crate::plan) fn release_inner(
225 &mut self,
226 tls: VMWorkerThread,
227 unlog_bits_op: UnlogBitsOperation,
228 ) {
229 self.common.release(tls, true);
230 self.immix_space.release(true, unlog_bits_op);
232 }
233}