mmtk/plan/markcompact/
gc_work.rs1use super::global::MarkCompact;
2use crate::policy::markcompactspace::MarkCompactSpace;
3use crate::policy::markcompactspace::{TRACE_KIND_FORWARD, TRACE_KIND_MARK};
4use crate::scheduler::gc_work::PlanProcessEdges;
5use crate::scheduler::gc_work::*;
6use crate::scheduler::GCWork;
7use crate::scheduler::GCWorker;
8use crate::scheduler::WorkBucketStage;
9use crate::vm::ActivePlan;
10use crate::vm::Scanning;
11use crate::vm::VMBinding;
12use crate::MMTK;
13use std::marker::PhantomData;
14
15pub struct CalculateForwardingAddress<VM: VMBinding> {
17 mc_space: &'static MarkCompactSpace<VM>,
18}
19
20impl<VM: VMBinding> GCWork<VM> for CalculateForwardingAddress<VM> {
21 fn do_work(&mut self, _worker: &mut GCWorker<VM>, _mmtk: &'static MMTK<VM>) {
22 self.mc_space.calculate_forwarding_pointer();
23 }
24}
25
26impl<VM: VMBinding> CalculateForwardingAddress<VM> {
27 pub fn new(mc_space: &'static MarkCompactSpace<VM>) -> Self {
28 Self { mc_space }
29 }
30}
31
32pub struct UpdateReferences<VM: VMBinding> {
35 plan: *const MarkCompact<VM>,
36 p: PhantomData<VM>,
37}
38
39unsafe impl<VM: VMBinding> Send for UpdateReferences<VM> {}
40
41impl<VM: VMBinding> GCWork<VM> for UpdateReferences<VM> {
42 fn do_work(&mut self, worker: &mut GCWorker<VM>, mmtk: &'static MMTK<VM>) {
43 VM::VMScanning::prepare_for_roots_re_scanning();
45 mmtk.state.prepare_for_stack_scanning();
46 let plan_mut = unsafe { &mut *(self.plan as *mut MarkCompact<VM>) };
48 plan_mut.common.release(worker.tls, true);
49 plan_mut.common.prepare(worker.tls, true);
50 #[cfg(feature = "extreme_assertions")]
51 mmtk.slot_logger.reset();
52
53 mmtk.scheduler
55 .worker_group
56 .get_and_clear_worker_live_bytes();
57
58 for mutator in VM::VMActivePlan::mutators() {
59 mmtk.scheduler.work_buckets[WorkBucketStage::SecondRoots].add(ScanMutatorRoots::<
60 MarkCompactForwardingGCWorkContext<VM>,
61 >(mutator));
62 }
63
64 mmtk.scheduler.work_buckets[WorkBucketStage::SecondRoots]
65 .add(ScanVMSpecificRoots::<MarkCompactForwardingGCWorkContext<VM>>::new());
66 }
67}
68
69impl<VM: VMBinding> UpdateReferences<VM> {
70 pub fn new(plan: &MarkCompact<VM>) -> Self {
71 Self {
72 plan,
73 p: PhantomData,
74 }
75 }
76}
77
78pub struct Compact<VM: VMBinding> {
80 mc_space: &'static MarkCompactSpace<VM>,
81}
82
83impl<VM: VMBinding> GCWork<VM> for Compact<VM> {
84 fn do_work(&mut self, _worker: &mut GCWorker<VM>, _mmtk: &'static MMTK<VM>) {
85 self.mc_space.compact();
86 }
87}
88
89impl<VM: VMBinding> Compact<VM> {
90 pub fn new(mc_space: &'static MarkCompactSpace<VM>) -> Self {
91 Self { mc_space }
92 }
93}
94
95pub type MarkingProcessEdges<VM> = PlanProcessEdges<VM, MarkCompact<VM>, TRACE_KIND_MARK>;
97pub type ForwardingProcessEdges<VM> = PlanProcessEdges<VM, MarkCompact<VM>, TRACE_KIND_FORWARD>;
99
100pub struct MarkCompactGCWorkContext<VM: VMBinding>(std::marker::PhantomData<VM>);
101impl<VM: VMBinding> crate::scheduler::GCWorkContext for MarkCompactGCWorkContext<VM> {
102 type VM = VM;
103 type PlanType = MarkCompact<VM>;
104 type DefaultProcessEdges = MarkingProcessEdges<VM>;
105 type PinningProcessEdges = UnsupportedProcessEdges<VM>;
106}
107
108pub struct MarkCompactForwardingGCWorkContext<VM: VMBinding>(std::marker::PhantomData<VM>);
109impl<VM: VMBinding> crate::scheduler::GCWorkContext for MarkCompactForwardingGCWorkContext<VM> {
110 type VM = VM;
111 type PlanType = MarkCompact<VM>;
112 type DefaultProcessEdges = ForwardingProcessEdges<VM>;
113 type PinningProcessEdges = UnsupportedProcessEdges<VM>;
114}