mmtk/plan/concurrent/immix/
global.rs1use crate::plan::concurrent::concurrent_marking_work::ProcessRootSlots;
2use crate::plan::concurrent::global::ConcurrentPlan;
3use crate::plan::concurrent::immix::gc_work::ConcurrentImmixGCWorkContext;
4use crate::plan::concurrent::immix::gc_work::ConcurrentImmixSTWGCWorkContext;
5use crate::plan::concurrent::Pause;
6use crate::plan::global::BasePlan;
7use crate::plan::global::CommonPlan;
8use crate::plan::global::CreateGeneralPlanArgs;
9use crate::plan::global::CreateSpecificPlanArgs;
10use crate::plan::immix::mutator::ALLOCATOR_MAPPING;
11use crate::plan::AllocationSemantics;
12use crate::plan::Plan;
13use crate::plan::PlanConstraints;
14use crate::policy::immix::defrag::StatsForDefrag;
15use crate::policy::immix::ImmixSpaceArgs;
16use crate::policy::immix::TRACE_KIND_DEFRAG;
17use crate::policy::immix::TRACE_KIND_FAST;
18use crate::policy::space::Space;
19use crate::scheduler::gc_work::Release;
20use crate::scheduler::gc_work::StopMutators;
21use crate::scheduler::gc_work::UnsupportedProcessEdges;
22use crate::scheduler::gc_work::VMProcessWeakRefs;
23use crate::scheduler::*;
24use crate::util::alloc::allocators::AllocatorSelector;
25use crate::util::copy::*;
26use crate::util::heap::gc_trigger::SpaceStats;
27use crate::util::heap::VMRequest;
28use crate::util::metadata::log_bit::UnlogBitsOperation;
29use crate::util::metadata::side_metadata::SideMetadataContext;
30use crate::vm::ObjectModel;
31use crate::vm::VMBinding;
32use crate::{policy::immix::ImmixSpace, util::opaque_pointer::VMWorkerThread};
33use std::sync::atomic::AtomicBool;
34
35use atomic::Atomic;
36use atomic::Ordering;
37use enum_map::EnumMap;
38
39use mmtk_macros::{HasSpaces, PlanTraceObject};
40
41#[derive(HasSpaces, PlanTraceObject)]
44pub struct ConcurrentImmix<VM: VMBinding> {
45 #[post_scan]
46 #[space]
47 #[copy_semantics(CopySemantics::DefaultCopy)]
48 pub immix_space: ImmixSpace<VM>,
49 #[parent]
50 pub common: CommonPlan<VM>,
51 last_gc_was_defrag: AtomicBool,
52 current_pause: Atomic<Option<Pause>>,
53 previous_pause: Atomic<Option<Pause>>,
54 should_do_full_gc: AtomicBool,
55 concurrent_marking_active: AtomicBool,
56}
57
58pub const CONCURRENT_IMMIX_CONSTRAINTS: PlanConstraints = PlanConstraints {
60 moves_objects: !cfg!(feature = "immix_non_moving"),
62 max_non_los_default_alloc_bytes: crate::policy::immix::MAX_IMMIX_OBJECT_SIZE,
64 needs_prepare_mutator: true,
65 barrier: crate::BarrierSelector::SATBBarrier,
66 needs_log_bit: true,
67 ..PlanConstraints::default()
68};
69
70impl<VM: VMBinding> Plan for ConcurrentImmix<VM> {
71 fn collection_required(&self, space_full: bool, _space: Option<SpaceStats<Self::VM>>) -> bool {
72 if self.base().collection_required(self, space_full) {
73 self.should_do_full_gc.store(true, Ordering::Release);
74 info!("Triggering full GC");
75 return true;
76 }
77
78 let concurrent_marking_in_progress = self.concurrent_marking_in_progress();
80 if concurrent_marking_in_progress
81 && self.common.base.scheduler.work_buckets[WorkBucketStage::Concurrent].is_drained()
82 {
83 return true;
87 }
88
89 if self.concurrent_marking_is_disabled() {
93 return false;
94 }
95
96 let threshold = self.get_total_pages() >> 1;
97 let used_pages_after_last_gc = self.common.base.global_state.get_used_pages_after_last_gc();
98 let used_pages_now = self.get_used_pages();
99 let allocated = used_pages_now.saturating_sub(used_pages_after_last_gc);
100 if !concurrent_marking_in_progress && allocated > threshold {
101 info!("Allocated {allocated} pages since last GC ({used_pages_now} - {used_pages_after_last_gc} > {threshold}): Do concurrent marking");
102 debug_assert!(
103 self.common.base.scheduler.work_buckets[WorkBucketStage::Concurrent].is_empty()
104 );
105 debug_assert!(!self.concurrent_marking_in_progress());
106 debug_assert_ne!(self.previous_pause(), Some(Pause::InitialMark));
107 return true;
108 }
109
110 false
111 }
112
113 fn last_collection_was_exhaustive(&self) -> bool {
114 self.immix_space
115 .is_last_gc_exhaustive(self.last_gc_was_defrag.load(Ordering::Relaxed))
116 }
117
118 fn constraints(&self) -> &'static PlanConstraints {
119 &CONCURRENT_IMMIX_CONSTRAINTS
120 }
121
122 fn create_copy_config(&'static self) -> CopyConfig<Self::VM> {
123 use enum_map::enum_map;
124 CopyConfig {
125 copy_mapping: enum_map! {
126 CopySemantics::DefaultCopy => CopySelector::Immix(0),
127 _ => CopySelector::Unused,
128 },
129 space_mapping: vec![(CopySelector::Immix(0), &self.immix_space)],
130 constraints: &CONCURRENT_IMMIX_CONSTRAINTS,
131 }
132 }
133
134 fn schedule_collection(&'static self, scheduler: &GCWorkScheduler<VM>) {
135 if self.concurrent_marking_is_disabled() {
141 self.should_do_full_gc.store(true, Ordering::SeqCst);
142 }
143
144 let pause = if self.concurrent_marking_in_progress() {
145 Pause::FinalMark
149 } else if self.should_do_full_gc.load(Ordering::SeqCst) {
150 Pause::Full
151 } else {
152 Pause::InitialMark
153 };
154
155 self.current_pause.store(Some(pause), Ordering::SeqCst);
156
157 probe!(mmtk, concurrent_pause_determined, pause as usize);
158
159 match pause {
160 Pause::Full => {
161 self.set_ref_closure_buckets_enabled(true);
164 crate::plan::immix::global::Immix::schedule_immix_full_heap_collection::<
165 ConcurrentImmix<VM>,
166 ConcurrentImmixSTWGCWorkContext<VM, TRACE_KIND_FAST>,
167 ConcurrentImmixSTWGCWorkContext<VM, TRACE_KIND_DEFRAG>,
168 >(self, &self.immix_space, scheduler);
169 }
170 Pause::InitialMark => self.schedule_concurrent_marking_initial_pause(scheduler),
171 Pause::FinalMark => self.schedule_concurrent_marking_final_pause(scheduler),
172 }
173 }
174
175 fn get_allocator_mapping(&self) -> &'static EnumMap<AllocationSemantics, AllocatorSelector> {
176 &ALLOCATOR_MAPPING
177 }
178
179 fn prepare(&mut self, tls: VMWorkerThread) {
180 let pause = self.current_pause().unwrap();
181 match pause {
182 Pause::Full => {
183 self.common.prepare(tls, true);
184 self.immix_space.prepare(
185 true,
186 Some(StatsForDefrag::new(self)),
187 UnlogBitsOperation::NoOp,
189 );
190 }
191 Pause::InitialMark => {
192 self.immix_space.prepare(
193 true,
194 Some(StatsForDefrag::new(self)),
195 UnlogBitsOperation::BulkSet,
197 );
198
199 self.common.prepare(tls, true);
200 self.common
202 .schedule_unlog_bits_op(UnlogBitsOperation::BulkSet);
203 }
204 Pause::FinalMark => (),
205 }
206 }
207
208 fn release(&mut self, tls: VMWorkerThread) {
209 let pause = self.current_pause().unwrap();
210 match pause {
211 Pause::InitialMark => (),
212 Pause::Full | Pause::FinalMark => {
213 self.immix_space.release(
214 true,
215 UnlogBitsOperation::BulkClear,
217 );
218
219 self.common.release(tls, true);
220
221 if pause == Pause::FinalMark {
222 self.common
224 .schedule_unlog_bits_op(UnlogBitsOperation::BulkClear);
225 } else {
226 }
232 }
233 }
234 }
235
236 fn end_of_gc(&mut self, _tls: VMWorkerThread) {
237 self.last_gc_was_defrag
238 .store(self.immix_space.end_of_gc(), Ordering::Relaxed);
239
240 let pause = self.current_pause().unwrap();
241 if pause == Pause::InitialMark {
242 self.set_concurrent_marking_state(true);
243 }
244 self.previous_pause.store(Some(pause), Ordering::SeqCst);
245 self.current_pause.store(None, Ordering::SeqCst);
246 if pause != Pause::FinalMark {
247 self.should_do_full_gc.store(false, Ordering::SeqCst);
248 } else {
249 }
254 info!("{:?} end", pause);
255 }
256
257 fn current_gc_may_move_object(&self) -> bool {
258 self.immix_space.in_defrag()
259 }
260
261 fn get_collection_reserved_pages(&self) -> usize {
262 self.immix_space.defrag_headroom_pages()
263 }
264
265 fn get_used_pages(&self) -> usize {
266 self.immix_space.reserved_pages() + self.common.get_used_pages()
267 }
268
269 fn base(&self) -> &BasePlan<VM> {
270 &self.common.base
271 }
272
273 fn base_mut(&mut self) -> &mut BasePlan<Self::VM> {
274 &mut self.common.base
275 }
276
277 fn common(&self) -> &CommonPlan<VM> {
278 &self.common
279 }
280
281 fn notify_mutators_paused(&self, _scheduler: &GCWorkScheduler<VM>) {
282 use crate::vm::ActivePlan;
283 let pause = self.current_pause().unwrap();
284 match pause {
285 Pause::Full => {
286 self.set_concurrent_marking_state(false);
287 }
288 Pause::InitialMark => {
289 debug_assert!(
290 !self.concurrent_marking_in_progress(),
291 "prev pause: {:?}",
292 self.previous_pause().unwrap()
293 );
294 }
295 Pause::FinalMark => {
296 debug_assert!(self.concurrent_marking_in_progress());
297 for mutator in <VM as VMBinding>::VMActivePlan::mutators() {
299 mutator.barrier.flush();
300 }
301 self.set_concurrent_marking_state(false);
302 }
303 }
304 info!("{:?} start", pause);
305 }
306
307 fn concurrent(&self) -> Option<&dyn ConcurrentPlan<VM = VM>> {
308 Some(self)
309 }
310}
311
312impl<VM: VMBinding> ConcurrentImmix<VM> {
313 pub fn new(args: CreateGeneralPlanArgs<VM>) -> Self {
314 if *args.options.concurrent_immix_disable_concurrent_marking {
315 warn!("Option 'concurrent_immix_disable_concurrent_marking' is set to true. Concurrent marking is disabled for ConcurrentImmix. This will make ConcurrentImmix behave exactly like full heap Immix.");
316 }
317
318 let spec = crate::util::metadata::extract_side_metadata(&[
319 *VM::VMObjectModel::GLOBAL_LOG_BIT_SPEC,
320 ]);
321
322 let mut plan_args = CreateSpecificPlanArgs {
323 global_args: args,
324 constraints: &CONCURRENT_IMMIX_CONSTRAINTS,
325 global_side_metadata_specs: SideMetadataContext::new_global_specs(&spec),
326 };
327
328 let immix_args = ImmixSpaceArgs {
329 mixed_age: false,
330 never_move_objects: false,
331 };
332
333 let scheduler = &plan_args.global_args.scheduler;
336 scheduler.work_buckets[WorkBucketStage::VMRefForwarding].set_enabled(false);
337 scheduler.work_buckets[WorkBucketStage::CalculateForwarding].set_enabled(false);
338 scheduler.work_buckets[WorkBucketStage::SecondRoots].set_enabled(false);
339 scheduler.work_buckets[WorkBucketStage::RefForwarding].set_enabled(false);
340 scheduler.work_buckets[WorkBucketStage::FinalizableForwarding].set_enabled(false);
341 scheduler.work_buckets[WorkBucketStage::Compact].set_enabled(false);
342
343 let immix = ConcurrentImmix {
344 immix_space: ImmixSpace::new(
345 plan_args.get_normal_space_args("immix", true, false, VMRequest::discontiguous()),
346 immix_args,
347 ),
348 common: CommonPlan::new(plan_args),
349 last_gc_was_defrag: AtomicBool::new(false),
350 current_pause: Atomic::new(None),
351 previous_pause: Atomic::new(None),
352 should_do_full_gc: AtomicBool::new(false),
353 concurrent_marking_active: AtomicBool::new(false),
354 };
355
356 immix.verify_side_metadata_sanity();
357
358 immix
359 }
360
361 fn set_ref_closure_buckets_enabled(&self, do_closure: bool) {
362 let scheduler = &self.common.base.scheduler;
363 scheduler.work_buckets[WorkBucketStage::VMRefClosure].set_enabled(do_closure);
364 scheduler.work_buckets[WorkBucketStage::WeakRefClosure].set_enabled(do_closure);
365 scheduler.work_buckets[WorkBucketStage::FinalRefClosure].set_enabled(do_closure);
366 scheduler.work_buckets[WorkBucketStage::SoftRefClosure].set_enabled(do_closure);
367 scheduler.work_buckets[WorkBucketStage::PhantomRefClosure].set_enabled(do_closure);
368 }
369
370 pub(crate) fn schedule_concurrent_marking_initial_pause(
371 &'static self,
372 scheduler: &GCWorkScheduler<VM>,
373 ) {
374 use crate::scheduler::gc_work::Prepare;
375
376 self.set_ref_closure_buckets_enabled(false);
377
378 scheduler.work_buckets[WorkBucketStage::Unconstrained].add(StopMutators::<
379 ConcurrentImmixGCWorkContext<ProcessRootSlots<VM, Self, TRACE_KIND_FAST>>,
380 >::new());
381 scheduler.work_buckets[WorkBucketStage::Prepare].add(Prepare::<
382 ConcurrentImmixGCWorkContext<UnsupportedProcessEdges<VM>>,
383 >::new(self));
384 }
385
386 fn schedule_concurrent_marking_final_pause(&'static self, scheduler: &GCWorkScheduler<VM>) {
387 self.set_ref_closure_buckets_enabled(true);
388
389 scheduler.work_buckets[WorkBucketStage::Unconstrained].add(StopMutators::<
391 ConcurrentImmixGCWorkContext<ProcessRootSlots<VM, Self, TRACE_KIND_FAST>>,
392 >::new_no_scan_roots());
393
394 scheduler.work_buckets[WorkBucketStage::Release].add(Release::<
395 ConcurrentImmixGCWorkContext<UnsupportedProcessEdges<VM>>,
396 >::new(self));
397
398 type RefProcessingEdges<VM> =
401 crate::scheduler::gc_work::PlanProcessEdges<VM, ConcurrentImmix<VM>, TRACE_KIND_FAST>;
402 if !*self.base().options.no_reference_types {
404 use crate::util::reference_processor::{
405 PhantomRefProcessing, SoftRefProcessing, WeakRefProcessing,
406 };
407 scheduler.work_buckets[WorkBucketStage::SoftRefClosure]
408 .add(SoftRefProcessing::<RefProcessingEdges<VM>>::new());
409 scheduler.work_buckets[WorkBucketStage::WeakRefClosure]
410 .add(WeakRefProcessing::<VM>::new());
411 scheduler.work_buckets[WorkBucketStage::PhantomRefClosure]
412 .add(PhantomRefProcessing::<VM>::new());
413
414 use crate::util::reference_processor::RefEnqueue;
415 scheduler.work_buckets[WorkBucketStage::Release].add(RefEnqueue::<VM>::new());
416 }
417
418 if !*self.base().options.no_finalizer {
420 use crate::util::finalizable_processor::Finalization;
421 scheduler.work_buckets[WorkBucketStage::FinalRefClosure]
423 .add(Finalization::<RefProcessingEdges<VM>>::new());
424 }
425
426 scheduler.work_buckets[WorkBucketStage::VMRefClosure]
430 .set_sentinel(Box::new(VMProcessWeakRefs::<RefProcessingEdges<VM>>::new()));
431 }
432
433 pub fn concurrent_marking_in_progress(&self) -> bool {
434 self.concurrent_marking_active.load(Ordering::Acquire)
435 }
436
437 fn set_concurrent_marking_state(&self, active: bool) {
438 use crate::plan::global::HasSpaces;
439
440 let allocate_object_as_live = active;
442 self.for_each_space(&mut |space: &dyn Space<VM>| {
443 space.set_allocate_as_live(allocate_object_as_live);
444 });
445
446 self.concurrent_marking_active
448 .store(active, Ordering::SeqCst);
449
450 }
452
453 pub(super) fn is_concurrent_marking_active(&self) -> bool {
454 self.concurrent_marking_active.load(Ordering::SeqCst)
455 }
456
457 fn previous_pause(&self) -> Option<Pause> {
458 self.previous_pause.load(Ordering::SeqCst)
459 }
460
461 fn concurrent_marking_is_disabled(&self) -> bool {
462 *self
463 .base()
464 .options
465 .concurrent_immix_disable_concurrent_marking
466 }
467}
468
469impl<VM: VMBinding> ConcurrentPlan for ConcurrentImmix<VM> {
470 fn current_pause(&self) -> Option<Pause> {
471 self.current_pause.load(Ordering::SeqCst)
472 }
473
474 fn concurrent_work_in_progress(&self) -> bool {
475 self.concurrent_marking_in_progress()
476 }
477}