mmtk/plan/
tracing.rs

1//! This module contains code useful for tracing,
2//! i.e. visiting the reachable objects by traversing all or part of an object graph.
3
4use std::marker::PhantomData;
5
6use crate::scheduler::gc_work::{ProcessEdgesWork, SlotOf};
7use crate::scheduler::{GCWorker, WorkBucketStage, EDGES_WORK_BUFFER_SIZE};
8use crate::util::{ObjectReference, VMThread, VMWorkerThread};
9use crate::vm::{Scanning, SlotVisitor, VMBinding};
10
11/// This trait represents an object queue to enqueue objects during tracing.
12pub trait ObjectQueue {
13    /// Enqueue an object into the queue.
14    fn enqueue(&mut self, object: ObjectReference);
15}
16
17/// A vector queue for object references.
18pub type VectorObjectQueue = VectorQueue<ObjectReference>;
19
20/// An implementation of `ObjectQueue` using a `Vec`.
21///
22/// This can also be used as a buffer. For example, the mark stack or the write barrier mod-buffer.
23pub struct VectorQueue<T> {
24    /// Enqueued nodes.
25    buffer: Vec<T>,
26}
27
28impl<T> VectorQueue<T> {
29    /// Reserve a capacity of this on first enqueue to avoid frequent resizing.
30    const CAPACITY: usize = EDGES_WORK_BUFFER_SIZE;
31
32    /// Create an empty `VectorObjectQueue`.
33    pub fn new() -> Self {
34        Self { buffer: Vec::new() }
35    }
36
37    /// Return `true` if the queue is empty.
38    pub fn is_empty(&self) -> bool {
39        self.buffer.is_empty()
40    }
41
42    /// Return the contents of the underlying vector.  It will empty the queue.
43    pub fn take(&mut self) -> Vec<T> {
44        std::mem::take(&mut self.buffer)
45    }
46
47    /// Consume this `VectorObjectQueue` and return its underlying vector.
48    pub fn into_vec(self) -> Vec<T> {
49        self.buffer
50    }
51
52    /// Check if the buffer size reaches `CAPACITY`.
53    pub fn is_full(&self) -> bool {
54        self.buffer.len() >= Self::CAPACITY
55    }
56
57    /// Push an element to the queue. If the queue is empty, it will reserve
58    /// space to hold the number of elements defined by the capacity.
59    /// The user of this method needs to make sure the queue length does
60    /// not exceed the capacity to avoid allocating more space
61    /// (this method will not check the length against the capacity).
62    pub fn push(&mut self, v: T) {
63        if self.buffer.is_empty() {
64            self.buffer.reserve(Self::CAPACITY);
65        }
66        self.buffer.push(v);
67    }
68
69    /// Return the len of the queue
70    pub fn len(&self) -> usize {
71        self.buffer.len()
72    }
73
74    /// Empty the queue
75    pub fn clear(&mut self) {
76        self.buffer.clear()
77    }
78}
79
80impl<T> Default for VectorQueue<T> {
81    fn default() -> Self {
82        Self::new()
83    }
84}
85
86impl ObjectQueue for VectorQueue<ObjectReference> {
87    fn enqueue(&mut self, v: ObjectReference) {
88        self.push(v);
89    }
90}
91
92/// A transitive closure visitor to collect the slots from objects.
93/// It maintains a buffer for the slots, and flushes slots to a new work packet
94/// if the buffer is full or if the type gets dropped.
95pub struct ObjectsClosure<'a, E: ProcessEdgesWork> {
96    buffer: VectorQueue<SlotOf<E>>,
97    pub(crate) worker: &'a mut GCWorker<E::VM>,
98    bucket: WorkBucketStage,
99}
100
101impl<'a, E: ProcessEdgesWork> ObjectsClosure<'a, E> {
102    /// Create an [`ObjectsClosure`].
103    ///
104    /// Arguments:
105    /// * `worker`: the current worker. The objects closure should not leave the context of this worker.
106    /// * `bucket`: new work generated will be push ed to the bucket.
107    pub fn new(worker: &'a mut GCWorker<E::VM>, bucket: WorkBucketStage) -> Self {
108        Self {
109            buffer: VectorQueue::new(),
110            worker,
111            bucket,
112        }
113    }
114
115    fn flush(&mut self) {
116        let buf = self.buffer.take();
117        if !buf.is_empty() {
118            self.worker.add_work(
119                self.bucket,
120                E::new(buf, false, self.worker.mmtk, self.bucket),
121            );
122        }
123    }
124}
125
126impl<E: ProcessEdgesWork> SlotVisitor<SlotOf<E>> for ObjectsClosure<'_, E> {
127    fn visit_slot(&mut self, slot: SlotOf<E>) {
128        #[cfg(debug_assertions)]
129        {
130            use crate::vm::slot::Slot;
131            trace!(
132                "(ObjectsClosure) Visit slot {:?} (pointing to {:?})",
133                slot,
134                slot.load()
135            );
136        }
137        self.buffer.push(slot);
138        if self.buffer.is_full() {
139            self.flush();
140        }
141    }
142}
143
144impl<E: ProcessEdgesWork> Drop for ObjectsClosure<'_, E> {
145    fn drop(&mut self) {
146        self.flush();
147    }
148}
149
150/// For iterating over the slots of an object.
151// FIXME: This type iterates slots, but all of its current use cases only care about the values in the slots.
152// And it currently only works if the object supports slot enqueuing (i.e. `Scanning::scan_object` is implemented).
153// We may refactor the interface according to <https://github.com/mmtk/mmtk-core/issues/1375>
154pub(crate) struct SlotIterator<VM: VMBinding> {
155    _p: PhantomData<VM>,
156}
157
158impl<VM: VMBinding> SlotIterator<VM> {
159    /// Iterate over the slots of an object by applying a function to each slot.
160    pub fn iterate_fields<F: FnMut(VM::VMSlot)>(object: ObjectReference, _tls: VMThread, mut f: F) {
161        // FIXME: We should use tls from the arguments.
162        // See https://github.com/mmtk/mmtk-core/issues/1375
163        let fake_tls = VMWorkerThread(VMThread::UNINITIALIZED);
164        if !<VM::VMScanning as Scanning<VM>>::support_slot_enqueuing(fake_tls, object) {
165            panic!("SlotIterator::iterate_fields cannot be used on objects that don't support slot-enqueuing");
166        }
167        <VM::VMScanning as Scanning<VM>>::scan_object(fake_tls, object, &mut f);
168    }
169}