1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136
//! This module contains code useful for tracing,
//! i.e. visiting the reachable objects by traversing all or part of an object graph.
use crate::scheduler::gc_work::{ProcessEdgesWork, SlotOf};
use crate::scheduler::{GCWorker, WorkBucketStage};
use crate::util::ObjectReference;
use crate::vm::SlotVisitor;
/// This trait represents an object queue to enqueue objects during tracing.
pub trait ObjectQueue {
/// Enqueue an object into the queue.
fn enqueue(&mut self, object: ObjectReference);
}
/// A vector queue for object references.
pub type VectorObjectQueue = VectorQueue<ObjectReference>;
/// An implementation of `ObjectQueue` using a `Vec`.
///
/// This can also be used as a buffer. For example, the mark stack or the write barrier mod-buffer.
pub struct VectorQueue<T> {
/// Enqueued nodes.
buffer: Vec<T>,
}
impl<T> VectorQueue<T> {
/// Reserve a capacity of this on first enqueue to avoid frequent resizing.
const CAPACITY: usize = 4096;
/// Create an empty `VectorObjectQueue`.
pub fn new() -> Self {
Self { buffer: Vec::new() }
}
/// Return `true` if the queue is empty.
pub fn is_empty(&self) -> bool {
self.buffer.is_empty()
}
/// Return the contents of the underlying vector. It will empty the queue.
pub fn take(&mut self) -> Vec<T> {
std::mem::take(&mut self.buffer)
}
/// Consume this `VectorObjectQueue` and return its underlying vector.
pub fn into_vec(self) -> Vec<T> {
self.buffer
}
/// Check if the buffer size reaches `CAPACITY`.
pub fn is_full(&self) -> bool {
self.buffer.len() >= Self::CAPACITY
}
/// Push an element to the queue. If the queue is empty, it will reserve
/// space to hold the number of elements defined by the capacity.
/// The user of this method needs to make sure the queue length does
/// not exceed the capacity to avoid allocating more space
/// (this method will not check the length against the capacity).
pub fn push(&mut self, v: T) {
if self.buffer.is_empty() {
self.buffer.reserve(Self::CAPACITY);
}
self.buffer.push(v);
}
}
impl<T> Default for VectorQueue<T> {
fn default() -> Self {
Self::new()
}
}
impl ObjectQueue for VectorQueue<ObjectReference> {
fn enqueue(&mut self, v: ObjectReference) {
self.push(v);
}
}
/// A transitive closure visitor to collect the slots from objects.
/// It maintains a buffer for the slots, and flushes slots to a new work packet
/// if the buffer is full or if the type gets dropped.
pub struct ObjectsClosure<'a, E: ProcessEdgesWork> {
buffer: VectorQueue<SlotOf<E>>,
pub(crate) worker: &'a mut GCWorker<E::VM>,
bucket: WorkBucketStage,
}
impl<'a, E: ProcessEdgesWork> ObjectsClosure<'a, E> {
/// Create an [`ObjectsClosure`].
///
/// Arguments:
/// * `worker`: the current worker. The objects closure should not leave the context of this worker.
/// * `bucket`: new work generated will be push ed to the bucket.
pub fn new(worker: &'a mut GCWorker<E::VM>, bucket: WorkBucketStage) -> Self {
Self {
buffer: VectorQueue::new(),
worker,
bucket,
}
}
fn flush(&mut self) {
let buf = self.buffer.take();
if !buf.is_empty() {
self.worker.add_work(
self.bucket,
E::new(buf, false, self.worker.mmtk, self.bucket),
);
}
}
}
impl<'a, E: ProcessEdgesWork> SlotVisitor<SlotOf<E>> for ObjectsClosure<'a, E> {
fn visit_slot(&mut self, slot: SlotOf<E>) {
#[cfg(debug_assertions)]
{
use crate::vm::slot::Slot;
trace!(
"(ObjectsClosure) Visit slot {:?} (pointing to {:?})",
slot,
slot.load()
);
}
self.buffer.push(slot);
if self.buffer.is_full() {
self.flush();
}
}
}
impl<'a, E: ProcessEdgesWork> Drop for ObjectsClosure<'a, E> {
fn drop(&mut self) {
self.flush();
}
}