mmtk/plan/
barriers.rs

1//! Read/Write barrier implementations.
2
3use crate::vm::slot::{MemorySlice, Slot};
4use crate::vm::ObjectModel;
5use crate::{
6    util::{metadata::MetadataSpec, *},
7    vm::VMBinding,
8};
9use atomic::Ordering;
10use downcast_rs::Downcast;
11
12/// BarrierSelector describes which barrier to use.
13///
14/// This is used as an *indicator* for each plan to enable the correct barrier.
15/// For example, immix can use this selector to enable different barriers for analysis.
16///
17/// VM bindings may also use this to enable the correct fast-path, if the fast-path is implemented in the binding.
18#[derive(Copy, Clone, Debug, PartialEq)]
19pub enum BarrierSelector {
20    /// No barrier is used.
21    NoBarrier,
22    /// Object remembering post-write barrier is used.
23    ObjectBarrier,
24    /// Object remembering pre-write barrier with weak reference loading barrier.
25    // TODO: We might be able to generalize this to object remembering pre-write barrier.
26    SATBBarrier,
27}
28
29impl BarrierSelector {
30    /// A const function to check if two barrier selectors are the same.
31    pub const fn equals(&self, other: BarrierSelector) -> bool {
32        // cast enum to u8 then compare. Otherwise, we cannot do it in a const fn.
33        *self as u8 == other as u8
34    }
35}
36
37/// A barrier is a combination of fast-path behaviour + slow-path semantics.
38/// This trait exposes generic barrier interfaces. The implementations will define their
39/// own fast-path code and slow-path semantics.
40///
41/// Normally, a binding will call these generic barrier interfaces (`object_reference_write` and `memory_region_copy`) for subsuming barrier calls.
42///
43/// If a subsuming barrier cannot be easily deployed due to platform limitations, the binding may chosse to call both `object_reference_write_pre` and `object_reference_write_post`
44/// barrier before and after the store operation.
45///
46/// As a performance optimization, the binding may also choose to port the fast-path to the VM side,
47/// and call the slow-path (`object_reference_write_slow`) only if necessary.
48pub trait Barrier<VM: VMBinding>: 'static + Send + Downcast {
49    /// Flush thread-local states like buffers or remembered sets.
50    fn flush(&mut self) {}
51
52    /// Weak reference loading barrier.  A mutator should call this when loading from a weak
53    /// reference field, for example, when executing  `java.lang.ref.Reference.get()` in JVM, or
54    /// loading from a global weak table in CRuby.
55    ///
56    /// Note: Merely loading from a field holding weak reference into a local variable will create a
57    /// strong reference from the stack to the referent, changing its reachablilty from weakly
58    /// reachable to strongly reachable.  Concurrent garbage collectors may need to handle such
59    /// events specially.  See [SATBBarrier::load_weak_reference] for a concrete example.
60    ///
61    /// Arguments:
62    /// *   `referent`: The referent object which the weak reference is pointing to.
63    fn load_weak_reference(&mut self, _referent: ObjectReference) {}
64
65    /// Subsuming barrier for object reference write
66    fn object_reference_write(
67        &mut self,
68        src: ObjectReference,
69        slot: VM::VMSlot,
70        target: ObjectReference,
71    ) {
72        self.object_reference_write_pre(src, slot, Some(target));
73        slot.store(target);
74        self.object_reference_write_post(src, slot, Some(target));
75    }
76
77    /// Full pre-barrier for object reference write
78    fn object_reference_write_pre(
79        &mut self,
80        _src: ObjectReference,
81        _slot: VM::VMSlot,
82        _target: Option<ObjectReference>,
83    ) {
84    }
85
86    /// Full post-barrier for object reference write
87    fn object_reference_write_post(
88        &mut self,
89        _src: ObjectReference,
90        _slot: VM::VMSlot,
91        _target: Option<ObjectReference>,
92    ) {
93    }
94
95    /// Object reference write slow-path call.
96    /// This can be called either before or after the store, depend on the concrete barrier implementation.
97    fn object_reference_write_slow(
98        &mut self,
99        _src: ObjectReference,
100        _slot: VM::VMSlot,
101        _target: Option<ObjectReference>,
102    ) {
103    }
104
105    /// Subsuming barrier for array copy
106    fn memory_region_copy(&mut self, src: VM::VMMemorySlice, dst: VM::VMMemorySlice) {
107        self.memory_region_copy_pre(src.clone(), dst.clone());
108        VM::VMMemorySlice::copy(&src, &dst);
109        self.memory_region_copy_post(src, dst);
110    }
111
112    /// Full pre-barrier for array copy
113    fn memory_region_copy_pre(&mut self, _src: VM::VMMemorySlice, _dst: VM::VMMemorySlice) {}
114
115    /// Full post-barrier for array copy
116    fn memory_region_copy_post(&mut self, _src: VM::VMMemorySlice, _dst: VM::VMMemorySlice) {}
117
118    /// A pre-barrier indicating that some fields of the object will probably be modified soon.
119    /// Specifically, the caller should ensure that:
120    ///     * The barrier must called before any field modification.
121    ///     * Some fields (unknown at the time of calling this barrier) might be modified soon, without a write barrier.
122    ///     * There are no safepoints between the barrier call and the field writes.
123    ///
124    /// **Example use case for mmtk-openjdk:**
125    ///
126    /// The OpenJDK C2 slowpath allocation code
127    /// can do deoptimization after the allocation and before returning to C2 compiled code.
128    /// The deoptimization itself contains a safepoint. For generational plans, if a GC
129    /// happens at this safepoint, the allocated object will be promoted, and all the
130    /// subsequent field initialization should be recorded.
131    ///
132    // TODO: Review any potential use cases for other VM bindings.
133    fn object_probable_write(&mut self, _obj: ObjectReference) {}
134}
135
136impl_downcast!(Barrier<VM> where VM: VMBinding);
137
138/// Empty barrier implementation.
139/// For GCs that do not need any barriers
140///
141/// Note that since NoBarrier noes nothing but the object field write itself, it has no slow-path semantics (i.e. an no-op slow-path).
142pub struct NoBarrier;
143
144impl<VM: VMBinding> Barrier<VM> for NoBarrier {}
145
146/// A barrier semantics defines the barrier slow-path behaviour. For example, how an object barrier processes it's modbufs.
147/// Specifically, it defines the slow-path call interfaces and a call to flush buffers.
148///
149/// A barrier is a combination of fast-path behaviour + slow-path semantics.
150/// The fast-path code will decide whether to call the slow-path calls.
151pub trait BarrierSemantics: 'static + Send {
152    type VM: VMBinding;
153
154    const UNLOG_BIT_SPEC: MetadataSpec =
155        *<Self::VM as VMBinding>::VMObjectModel::GLOBAL_LOG_BIT_SPEC.as_spec();
156
157    /// Flush thread-local buffers or remembered sets.
158    /// Normally this is called by the slow-path implementation whenever the thread-local buffers are full.
159    /// This will also be called externally by the VM, when the thread is being destroyed.
160    fn flush(&mut self);
161
162    /// Slow-path call for object field write operations.
163    fn object_reference_write_slow(
164        &mut self,
165        src: ObjectReference,
166        slot: <Self::VM as VMBinding>::VMSlot,
167        target: Option<ObjectReference>,
168    );
169
170    /// Slow-path call for mempry slice copy operations. For example, array-copy operations.
171    fn memory_region_copy_slow(
172        &mut self,
173        src: <Self::VM as VMBinding>::VMMemorySlice,
174        dst: <Self::VM as VMBinding>::VMMemorySlice,
175    );
176
177    /// Object will probably be modified
178    fn object_probable_write_slow(&mut self, _obj: ObjectReference) {}
179
180    /// Loading from a weak reference field
181    fn load_weak_reference(&mut self, _o: ObjectReference) {}
182}
183
184/// Generic object barrier with a type argument defining it's slow-path behaviour.
185pub struct ObjectBarrier<S: BarrierSemantics> {
186    semantics: S,
187}
188
189impl<S: BarrierSemantics> ObjectBarrier<S> {
190    /// Create a new ObjectBarrier with the given semantics.
191    pub fn new(semantics: S) -> Self {
192        Self { semantics }
193    }
194
195    /// Attempt to atomically log an object.
196    /// Returns true if the object is not logged previously.
197    fn object_is_unlogged(&self, object: ObjectReference) -> bool {
198        unsafe { S::UNLOG_BIT_SPEC.load::<S::VM, u8>(object, None) != 0 }
199    }
200
201    /// Attempt to atomically log an object.
202    /// Returns true if the object is not logged previously.
203    fn log_object(&self, object: ObjectReference) -> bool {
204        #[cfg(all(feature = "vo_bit", feature = "extreme_assertions"))]
205        debug_assert!(
206            crate::util::metadata::vo_bit::is_vo_bit_set(object),
207            "object bit is unset"
208        );
209        loop {
210            let old_value =
211                S::UNLOG_BIT_SPEC.load_atomic::<S::VM, u8>(object, None, Ordering::SeqCst);
212            if old_value == 0 {
213                return false;
214            }
215            if S::UNLOG_BIT_SPEC
216                .compare_exchange_metadata::<S::VM, u8>(
217                    object,
218                    1,
219                    0,
220                    None,
221                    Ordering::SeqCst,
222                    Ordering::SeqCst,
223                )
224                .is_ok()
225            {
226                return true;
227            }
228        }
229    }
230}
231
232impl<S: BarrierSemantics> Barrier<S::VM> for ObjectBarrier<S> {
233    fn flush(&mut self) {
234        self.semantics.flush();
235    }
236
237    fn object_reference_write_post(
238        &mut self,
239        src: ObjectReference,
240        slot: <S::VM as VMBinding>::VMSlot,
241        target: Option<ObjectReference>,
242    ) {
243        if self.object_is_unlogged(src) {
244            self.object_reference_write_slow(src, slot, target);
245        }
246    }
247
248    fn object_reference_write_slow(
249        &mut self,
250        src: ObjectReference,
251        slot: <S::VM as VMBinding>::VMSlot,
252        target: Option<ObjectReference>,
253    ) {
254        if self.log_object(src) {
255            self.semantics
256                .object_reference_write_slow(src, slot, target);
257        }
258    }
259
260    fn memory_region_copy_post(
261        &mut self,
262        src: <S::VM as VMBinding>::VMMemorySlice,
263        dst: <S::VM as VMBinding>::VMMemorySlice,
264    ) {
265        self.semantics.memory_region_copy_slow(src, dst);
266    }
267
268    fn object_probable_write(&mut self, obj: ObjectReference) {
269        if self.object_is_unlogged(obj) {
270            self.semantics.object_probable_write_slow(obj);
271        }
272    }
273}
274
275/// A SATB (Snapshot-At-The-Beginning) barrier implementation.
276/// This barrier is basically a pre-write object barrier with a weak reference loading barrier.
277pub struct SATBBarrier<S: BarrierSemantics> {
278    weak_ref_barrier_enabled: bool,
279    semantics: S,
280}
281
282impl<S: BarrierSemantics> SATBBarrier<S> {
283    /// Create a new SATBBarrier with the given semantics.
284    pub fn new(semantics: S) -> Self {
285        Self {
286            weak_ref_barrier_enabled: false,
287            semantics,
288        }
289    }
290
291    pub(crate) fn set_weak_ref_barrier_enabled(&mut self, value: bool) {
292        self.weak_ref_barrier_enabled = value;
293    }
294
295    fn object_is_unlogged(&self, object: ObjectReference) -> bool {
296        S::UNLOG_BIT_SPEC.load_atomic::<S::VM, u8>(object, None, Ordering::SeqCst) != 0
297    }
298}
299
300impl<S: BarrierSemantics> Barrier<S::VM> for SATBBarrier<S> {
301    fn flush(&mut self) {
302        self.semantics.flush();
303    }
304
305    fn load_weak_reference(&mut self, o: ObjectReference) {
306        if self.weak_ref_barrier_enabled {
307            self.semantics.load_weak_reference(o)
308        }
309    }
310
311    fn object_probable_write(&mut self, obj: ObjectReference) {
312        self.semantics.object_probable_write_slow(obj);
313    }
314
315    fn object_reference_write_pre(
316        &mut self,
317        src: ObjectReference,
318        slot: <S::VM as VMBinding>::VMSlot,
319        target: Option<ObjectReference>,
320    ) {
321        if self.object_is_unlogged(src) {
322            self.semantics
323                .object_reference_write_slow(src, slot, target);
324        }
325    }
326
327    fn object_reference_write_post(
328        &mut self,
329        _src: ObjectReference,
330        _slot: <S::VM as VMBinding>::VMSlot,
331        _target: Option<ObjectReference>,
332    ) {
333        unimplemented!()
334    }
335
336    fn object_reference_write_slow(
337        &mut self,
338        src: ObjectReference,
339        slot: <S::VM as VMBinding>::VMSlot,
340        target: Option<ObjectReference>,
341    ) {
342        self.semantics
343            .object_reference_write_slow(src, slot, target);
344    }
345
346    fn memory_region_copy_pre(
347        &mut self,
348        src: <S::VM as VMBinding>::VMMemorySlice,
349        dst: <S::VM as VMBinding>::VMMemorySlice,
350    ) {
351        self.semantics.memory_region_copy_slow(src, dst);
352    }
353
354    fn memory_region_copy_post(
355        &mut self,
356        _src: <S::VM as VMBinding>::VMMemorySlice,
357        _dst: <S::VM as VMBinding>::VMMemorySlice,
358    ) {
359        unimplemented!()
360    }
361}