mmtk/plan/concurrent/
barrier.rs

1use std::sync::atomic::Ordering;
2
3use super::{concurrent_marking_work::ProcessModBufSATB, Pause};
4use crate::plan::global::PlanTraceObject;
5use crate::policy::gc_work::TraceKind;
6use crate::util::VMMutatorThread;
7use crate::{
8    plan::{barriers::BarrierSemantics, concurrent::global::ConcurrentPlan, VectorQueue},
9    scheduler::WorkBucketStage,
10    util::ObjectReference,
11    vm::{
12        slot::{MemorySlice, Slot},
13        VMBinding,
14    },
15    MMTK,
16};
17
18pub struct SATBBarrierSemantics<
19    VM: VMBinding,
20    P: ConcurrentPlan<VM = VM> + PlanTraceObject<VM>,
21    const KIND: TraceKind,
22> {
23    mmtk: &'static MMTK<VM>,
24    tls: VMMutatorThread,
25    satb: VectorQueue<ObjectReference>,
26    refs: VectorQueue<ObjectReference>,
27    plan: &'static P,
28}
29
30impl<VM: VMBinding, P: ConcurrentPlan<VM = VM> + PlanTraceObject<VM>, const KIND: TraceKind>
31    SATBBarrierSemantics<VM, P, KIND>
32{
33    pub fn new(mmtk: &'static MMTK<VM>, tls: VMMutatorThread) -> Self {
34        Self {
35            mmtk,
36            tls,
37            satb: VectorQueue::default(),
38            refs: VectorQueue::default(),
39            plan: mmtk.get_plan().downcast_ref::<P>().unwrap(),
40        }
41    }
42
43    fn slow(&mut self, _src: Option<ObjectReference>, _slot: VM::VMSlot, old: ObjectReference) {
44        self.satb.push(old);
45        if self.satb.is_full() {
46            self.flush_satb();
47        }
48    }
49
50    fn enqueue_node(
51        &mut self,
52        src: Option<ObjectReference>,
53        slot: VM::VMSlot,
54        _new: Option<ObjectReference>,
55    ) -> bool {
56        if let Some(old) = slot.load() {
57            self.slow(src, slot, old);
58        }
59        true
60    }
61
62    /// Attempt to atomically log an object.
63    /// Returns true if the object is not logged previously.
64    fn log_object(&self, object: ObjectReference) -> bool {
65        Self::UNLOG_BIT_SPEC.store_atomic::<VM, u8>(object, 0, None, Ordering::SeqCst);
66        true
67    }
68
69    fn flush_satb(&mut self) {
70        if !self.satb.is_empty() {
71            if self.should_create_satb_packets() {
72                let satb = self.satb.take();
73                let bucket = if self.plan.concurrent_work_in_progress() {
74                    WorkBucketStage::Concurrent
75                } else {
76                    debug_assert_ne!(self.plan.current_pause(), Some(Pause::InitialMark));
77                    WorkBucketStage::Closure
78                };
79                self.mmtk.scheduler.work_buckets[bucket]
80                    .add(ProcessModBufSATB::<VM, P, KIND>::new(satb));
81            } else {
82                let _ = self.satb.take();
83            };
84        }
85    }
86
87    #[cold]
88    fn flush_weak_refs(&mut self) {
89        if !self.refs.is_empty() {
90            let nodes = self.refs.take();
91            let bucket = if self.plan.concurrent_work_in_progress() {
92                WorkBucketStage::Concurrent
93            } else {
94                debug_assert_ne!(self.plan.current_pause(), Some(Pause::InitialMark));
95                WorkBucketStage::Closure
96            };
97            self.mmtk.scheduler.work_buckets[bucket]
98                .add(ProcessModBufSATB::<VM, P, KIND>::new(nodes));
99        }
100    }
101
102    fn should_create_satb_packets(&self) -> bool {
103        self.plan.concurrent_work_in_progress()
104            || self.plan.current_pause() == Some(Pause::FinalMark)
105    }
106}
107
108impl<VM: VMBinding, P: ConcurrentPlan<VM = VM> + PlanTraceObject<VM>, const KIND: TraceKind>
109    BarrierSemantics for SATBBarrierSemantics<VM, P, KIND>
110{
111    type VM = VM;
112
113    #[cold]
114    fn flush(&mut self) {
115        self.flush_satb();
116        self.flush_weak_refs();
117    }
118
119    fn object_reference_write_slow(
120        &mut self,
121        src: ObjectReference,
122        _slot: <Self::VM as VMBinding>::VMSlot,
123        _target: Option<ObjectReference>,
124    ) {
125        self.object_probable_write_slow(src);
126        self.log_object(src);
127    }
128
129    fn memory_region_copy_slow(
130        &mut self,
131        _src: <Self::VM as VMBinding>::VMMemorySlice,
132        dst: <Self::VM as VMBinding>::VMMemorySlice,
133    ) {
134        for s in dst.iter_slots() {
135            self.enqueue_node(None, s, None);
136        }
137    }
138
139    /// Enqueue the referent during concurrent marking.
140    ///
141    /// Note: During concurrent marking, a collector based on snapshot-at-the-beginning (SATB) will
142    /// not reach objects that were weakly reachable at the time of `InitialMark`.  But if a mutator
143    /// loads from a weak reference field during concurrent marking, it will make the referent
144    /// strongly reachable, yet the referent is still not part of the SATB.  We must conservatively
145    /// enqueue the referent even though its reachability has not yet been established, otherwise it
146    /// (and its children) may be treated as garbage if it happened to be weakly reachable at the
147    /// time of `InitialMark`.
148    fn load_weak_reference(&mut self, o: ObjectReference) {
149        if !self.plan.concurrent_work_in_progress() {
150            return;
151        }
152        self.refs.push(o);
153        if self.refs.is_full() {
154            self.flush_weak_refs();
155        }
156    }
157
158    fn object_probable_write_slow(&mut self, obj: ObjectReference) {
159        crate::plan::tracing::SlotIterator::<VM>::iterate_fields(obj, self.tls.0, |s| {
160            self.enqueue_node(Some(obj), s, None);
161        });
162    }
163}