mmtk/plan/tracing.rs
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169
//! This module contains code useful for tracing,
//! i.e. visiting the reachable objects by traversing all or part of an object graph.
use std::marker::PhantomData;
use crate::scheduler::gc_work::{ProcessEdgesWork, SlotOf};
use crate::scheduler::{GCWorker, WorkBucketStage, EDGES_WORK_BUFFER_SIZE};
use crate::util::{ObjectReference, VMThread, VMWorkerThread};
use crate::vm::{Scanning, SlotVisitor, VMBinding};
/// This trait represents an object queue to enqueue objects during tracing.
pub trait ObjectQueue {
/// Enqueue an object into the queue.
fn enqueue(&mut self, object: ObjectReference);
}
/// A vector queue for object references.
pub type VectorObjectQueue = VectorQueue<ObjectReference>;
/// An implementation of `ObjectQueue` using a `Vec`.
///
/// This can also be used as a buffer. For example, the mark stack or the write barrier mod-buffer.
pub struct VectorQueue<T> {
/// Enqueued nodes.
buffer: Vec<T>,
}
impl<T> VectorQueue<T> {
/// Reserve a capacity of this on first enqueue to avoid frequent resizing.
const CAPACITY: usize = EDGES_WORK_BUFFER_SIZE;
/// Create an empty `VectorObjectQueue`.
pub fn new() -> Self {
Self { buffer: Vec::new() }
}
/// Return `true` if the queue is empty.
pub fn is_empty(&self) -> bool {
self.buffer.is_empty()
}
/// Return the contents of the underlying vector. It will empty the queue.
pub fn take(&mut self) -> Vec<T> {
std::mem::take(&mut self.buffer)
}
/// Consume this `VectorObjectQueue` and return its underlying vector.
pub fn into_vec(self) -> Vec<T> {
self.buffer
}
/// Check if the buffer size reaches `CAPACITY`.
pub fn is_full(&self) -> bool {
self.buffer.len() >= Self::CAPACITY
}
/// Push an element to the queue. If the queue is empty, it will reserve
/// space to hold the number of elements defined by the capacity.
/// The user of this method needs to make sure the queue length does
/// not exceed the capacity to avoid allocating more space
/// (this method will not check the length against the capacity).
pub fn push(&mut self, v: T) {
if self.buffer.is_empty() {
self.buffer.reserve(Self::CAPACITY);
}
self.buffer.push(v);
}
/// Return the len of the queue
pub fn len(&self) -> usize {
self.buffer.len()
}
/// Empty the queue
pub fn clear(&mut self) {
self.buffer.clear()
}
}
impl<T> Default for VectorQueue<T> {
fn default() -> Self {
Self::new()
}
}
impl ObjectQueue for VectorQueue<ObjectReference> {
fn enqueue(&mut self, v: ObjectReference) {
self.push(v);
}
}
/// A transitive closure visitor to collect the slots from objects.
/// It maintains a buffer for the slots, and flushes slots to a new work packet
/// if the buffer is full or if the type gets dropped.
pub struct ObjectsClosure<'a, E: ProcessEdgesWork> {
buffer: VectorQueue<SlotOf<E>>,
pub(crate) worker: &'a mut GCWorker<E::VM>,
bucket: WorkBucketStage,
}
impl<'a, E: ProcessEdgesWork> ObjectsClosure<'a, E> {
/// Create an [`ObjectsClosure`].
///
/// Arguments:
/// * `worker`: the current worker. The objects closure should not leave the context of this worker.
/// * `bucket`: new work generated will be push ed to the bucket.
pub fn new(worker: &'a mut GCWorker<E::VM>, bucket: WorkBucketStage) -> Self {
Self {
buffer: VectorQueue::new(),
worker,
bucket,
}
}
fn flush(&mut self) {
let buf = self.buffer.take();
if !buf.is_empty() {
self.worker.add_work(
self.bucket,
E::new(buf, false, self.worker.mmtk, self.bucket),
);
}
}
}
impl<E: ProcessEdgesWork> SlotVisitor<SlotOf<E>> for ObjectsClosure<'_, E> {
fn visit_slot(&mut self, slot: SlotOf<E>) {
#[cfg(debug_assertions)]
{
use crate::vm::slot::Slot;
trace!(
"(ObjectsClosure) Visit slot {:?} (pointing to {:?})",
slot,
slot.load()
);
}
self.buffer.push(slot);
if self.buffer.is_full() {
self.flush();
}
}
}
impl<E: ProcessEdgesWork> Drop for ObjectsClosure<'_, E> {
fn drop(&mut self) {
self.flush();
}
}
/// For iterating over the slots of an object.
// FIXME: This type iterates slots, but all of its current use cases only care about the values in the slots.
// And it currently only works if the object supports slot enqueuing (i.e. `Scanning::scan_object` is implemented).
// We may refactor the interface according to <https://github.com/mmtk/mmtk-core/issues/1375>
pub(crate) struct SlotIterator<VM: VMBinding> {
_p: PhantomData<VM>,
}
impl<VM: VMBinding> SlotIterator<VM> {
/// Iterate over the slots of an object by applying a function to each slot.
pub fn iterate_fields<F: FnMut(VM::VMSlot)>(object: ObjectReference, _tls: VMThread, mut f: F) {
// FIXME: We should use tls from the arguments.
// See https://github.com/mmtk/mmtk-core/issues/1375
let fake_tls = VMWorkerThread(VMThread::UNINITIALIZED);
if !<VM::VMScanning as Scanning<VM>>::support_slot_enqueuing(fake_tls, object) {
panic!("SlotIterator::iterate_fields cannot be used on objects that don't support slot-enqueuing");
}
<VM::VMScanning as Scanning<VM>>::scan_object(fake_tls, object, &mut f);
}
}