use crate::plan::VectorObjectQueue;
use crate::policy::compressor::forwarding;
use crate::policy::gc_work::{TraceKind, TRACE_KIND_TRANSITIVE_PIN};
use crate::policy::largeobjectspace::LargeObjectSpace;
use crate::policy::sft::{GCWorkerMutRef, SFT};
use crate::policy::space::{CommonSpace, Space};
use crate::scheduler::{GCWork, GCWorkScheduler, GCWorker, WorkBucketStage};
use crate::util::copy::CopySemantics;
use crate::util::heap::regionpageresource::AllocatedRegion;
use crate::util::heap::{PageResource, RegionPageResource};
use crate::util::linear_scan::Region;
use crate::util::metadata::extract_side_metadata;
#[cfg(feature = "vo_bit")]
use crate::util::metadata::vo_bit;
use crate::util::metadata::MetadataSpec;
use crate::util::object_enum::{self, ObjectEnumerator};
use crate::util::{Address, ObjectReference};
use crate::vm::slot::Slot;
use crate::MMTK;
use crate::{vm::*, ObjectQueue};
use atomic::Ordering;
use std::sync::Arc;
pub(crate) const TRACE_KIND_MARK: TraceKind = 0;
pub(crate) const TRACE_KIND_FORWARD_ROOT: TraceKind = 1;
pub struct CompressorSpace<VM: VMBinding> {
common: CommonSpace<VM>,
pr: RegionPageResource<VM, forwarding::CompressorRegion>,
forwarding: forwarding::ForwardingMetadata<VM>,
scheduler: Arc<GCWorkScheduler<VM>>,
}
pub(crate) const GC_MARK_BIT_MASK: u8 = 1;
impl<VM: VMBinding> SFT for CompressorSpace<VM> {
fn name(&self) -> &'static str {
self.get_name()
}
fn get_forwarded_object(&self, object: ObjectReference) -> Option<ObjectReference> {
if self.forwarding.has_calculated_forwarding_addresses() {
Some(self.forward(object, false))
} else {
None
}
}
fn is_live(&self, object: ObjectReference) -> bool {
Self::is_marked(object)
}
#[cfg(feature = "object_pinning")]
fn pin_object(&self, _object: ObjectReference) -> bool {
panic!("Cannot pin/unpin objects of CompressorSpace.")
}
#[cfg(feature = "object_pinning")]
fn unpin_object(&self, _object: ObjectReference) -> bool {
panic!("Cannot pin/unpin objects of CompressorSpace.")
}
#[cfg(feature = "object_pinning")]
fn is_object_pinned(&self, _object: ObjectReference) -> bool {
false
}
fn is_movable(&self) -> bool {
true
}
fn initialize_object_metadata(&self, _object: ObjectReference) {
#[cfg(feature = "vo_bit")]
crate::util::metadata::vo_bit::set_vo_bit(_object);
}
#[cfg(feature = "sanity")]
fn is_sane(&self) -> bool {
true
}
#[cfg(feature = "is_mmtk_object")]
fn is_mmtk_object(&self, addr: Address) -> Option<ObjectReference> {
crate::util::metadata::vo_bit::is_vo_bit_set_for_addr(addr)
}
#[cfg(feature = "is_mmtk_object")]
fn find_object_from_internal_pointer(
&self,
ptr: Address,
max_search_bytes: usize,
) -> Option<ObjectReference> {
crate::util::metadata::vo_bit::find_object_from_internal_pointer::<VM>(
ptr,
max_search_bytes,
)
}
fn sft_trace_object(
&self,
_queue: &mut VectorObjectQueue,
_object: ObjectReference,
_worker: GCWorkerMutRef,
) -> ObjectReference {
panic!("sft_trace_object() cannot be used with CompressorSpace")
}
fn debug_print_object_info(&self, object: ObjectReference) {
println!("marked = {}", CompressorSpace::<VM>::is_marked(object));
println!("forwarding = {:?}", self.get_forwarded_object(object));
self.common.debug_print_object_global_info(object);
}
}
impl<VM: VMBinding> Space<VM> for CompressorSpace<VM> {
fn as_space(&self) -> &dyn Space<VM> {
self
}
fn as_sft(&self) -> &(dyn SFT + Sync + 'static) {
self
}
fn get_page_resource(&self) -> &dyn PageResource<VM> {
&self.pr
}
fn maybe_get_page_resource_mut(&mut self) -> Option<&mut dyn PageResource<VM>> {
Some(&mut self.pr)
}
fn common(&self) -> &CommonSpace<VM> {
&self.common
}
fn initialize_sft(&self, sft_map: &mut dyn crate::policy::sft_map::SFTMap) {
self.common().initialize_sft(self.as_sft(), sft_map)
}
fn release_multiple_pages(&mut self, _start: Address) {
panic!("compressorspace only releases pages enmasse")
}
fn enumerate_objects(&self, enumerator: &mut dyn ObjectEnumerator) {
self.pr.enumerate(enumerator);
}
fn clear_side_log_bits(&self) {
unimplemented!()
}
fn set_side_log_bits(&self) {
unimplemented!()
}
}
impl<VM: VMBinding> crate::policy::gc_work::PolicyTraceObject<VM> for CompressorSpace<VM> {
fn trace_object<Q: ObjectQueue, const KIND: crate::policy::gc_work::TraceKind>(
&self,
queue: &mut Q,
object: ObjectReference,
_copy: Option<CopySemantics>,
_worker: &mut GCWorker<VM>,
) -> ObjectReference {
debug_assert!(
KIND != TRACE_KIND_TRANSITIVE_PIN,
"Compressor does not support transitive pin trace."
);
if KIND == TRACE_KIND_MARK {
self.trace_mark_object(queue, object)
} else if KIND == TRACE_KIND_FORWARD_ROOT {
self.trace_forward_root(queue, object)
} else {
unreachable!()
}
}
fn may_move_objects<const KIND: crate::policy::gc_work::TraceKind>() -> bool {
if KIND == TRACE_KIND_MARK {
false
} else if KIND == TRACE_KIND_FORWARD_ROOT {
true
} else {
unreachable!()
}
}
}
impl<VM: VMBinding> CompressorSpace<VM> {
pub fn new(args: crate::policy::space::PlanCreateSpaceArgs<VM>) -> Self {
let vm_map = args.vm_map;
assert!(
VM::VMObjectModel::UNIFIED_OBJECT_REFERENCE_ADDRESS,
"The Compressor requires a unified object reference address model"
);
let local_specs = extract_side_metadata(&[
MetadataSpec::OnSide(forwarding::MARK_SPEC),
MetadataSpec::OnSide(forwarding::OFFSET_VECTOR_SPEC),
]);
let is_discontiguous = args.vmrequest.is_discontiguous();
let scheduler = args.scheduler.clone();
let common = CommonSpace::new(args.into_policy_args(true, false, local_specs));
CompressorSpace {
pr: if is_discontiguous {
RegionPageResource::new_discontiguous(vm_map)
} else {
RegionPageResource::new_contiguous(common.start, common.extent, vm_map)
},
forwarding: forwarding::ForwardingMetadata::new(),
common,
scheduler,
}
}
pub fn prepare(&self) {
self.pr
.enumerate_regions(&mut |r: &AllocatedRegion<forwarding::CompressorRegion>| {
forwarding::MARK_SPEC
.bzero_metadata(r.region.start(), r.region.end() - r.region.start());
});
}
pub fn release(&self) {
self.forwarding.release();
}
pub fn trace_mark_object<Q: ObjectQueue>(
&self,
queue: &mut Q,
object: ObjectReference,
) -> ObjectReference {
#[cfg(feature = "vo_bit")]
debug_assert!(
crate::util::metadata::vo_bit::is_vo_bit_set(object),
"{:x}: VO bit not set",
object
);
if CompressorSpace::<VM>::test_and_mark(object) {
queue.enqueue(object);
self.forwarding.mark_last_word_of_object(object);
}
object
}
pub fn trace_forward_root<Q: ObjectQueue>(
&self,
_queue: &mut Q,
object: ObjectReference,
) -> ObjectReference {
self.forward(object, true)
}
pub fn test_and_mark(object: ObjectReference) -> bool {
let old = forwarding::MARK_SPEC.fetch_or_atomic(
object.to_raw_address(),
GC_MARK_BIT_MASK,
Ordering::SeqCst,
);
(old & GC_MARK_BIT_MASK) == 0
}
pub fn is_marked(object: ObjectReference) -> bool {
let old_value =
forwarding::MARK_SPEC.load_atomic::<u8>(object.to_raw_address(), Ordering::SeqCst);
let mark_bit = old_value & GC_MARK_BIT_MASK;
mark_bit != 0
}
fn generate_tasks(
&self,
f: &mut impl FnMut(&AllocatedRegion<forwarding::CompressorRegion>, usize) -> Box<dyn GCWork<VM>>,
) -> Vec<Box<dyn GCWork<VM>>> {
let mut packets = vec![];
let mut index = 0;
self.pr.enumerate_regions(&mut |r| {
packets.push(f(r, index));
index += 1;
});
packets
}
pub fn add_offset_vector_tasks(&'static self) {
let offset_vector_packets: Vec<Box<dyn GCWork<VM>>> = self.generate_tasks(&mut |r, _| {
Box::new(CalculateOffsetVector::<VM>::new(self, r.region, r.cursor()))
});
self.scheduler.work_buckets[WorkBucketStage::CalculateForwarding]
.bulk_add(offset_vector_packets);
}
pub fn calculate_offset_vector_for_region(
&self,
region: forwarding::CompressorRegion,
cursor: Address,
) {
self.forwarding.calculate_offset_vector(region, cursor);
}
pub fn forward(&self, object: ObjectReference, _vo_bit_valid: bool) -> ObjectReference {
if !self.in_space(object) {
return object;
}
#[cfg(feature = "vo_bit")]
if _vo_bit_valid {
debug_assert!(
crate::util::metadata::vo_bit::is_vo_bit_set(object),
"{:x}: VO bit not set",
object
);
}
ObjectReference::from_raw_address(self.forwarding.forward(object.to_raw_address())).unwrap()
}
fn update_references(&self, worker: &mut GCWorker<VM>, object: ObjectReference) {
if VM::VMScanning::support_slot_enqueuing(worker.tls, object) {
VM::VMScanning::scan_object(worker.tls, object, &mut |s: VM::VMSlot| {
if let Some(o) = s.load() {
s.store(self.forward(o, false));
}
});
} else {
VM::VMScanning::scan_object_and_trace_edges(worker.tls, object, &mut |o| {
self.forward(o, false)
});
}
}
pub fn add_compact_tasks(&'static self) {
let compact_packets: Vec<Box<dyn GCWork<VM>>> =
self.generate_tasks(&mut |_, i| Box::new(Compact::<VM>::new(self, i)));
self.scheduler.work_buckets[WorkBucketStage::Compact].bulk_add(compact_packets);
}
pub fn compact_region(&self, worker: &mut GCWorker<VM>, index: usize) {
self.pr.with_regions(&mut |regions| {
let r = ®ions[index];
let start = r.region.start();
let end = r.cursor();
#[cfg(feature = "vo_bit")]
{
#[cfg(debug_assertions)]
self.forwarding
.scan_marked_objects(start, end, &mut |object: ObjectReference| {
debug_assert!(
crate::util::metadata::vo_bit::is_vo_bit_set(object),
"{:x}: VO bit not set",
object
);
});
crate::util::metadata::vo_bit::bzero_vo_bit(start, end - start);
}
let mut to = start;
self.forwarding
.scan_marked_objects(start, end, &mut |obj: ObjectReference| {
let copied_size = VM::VMObjectModel::get_size_when_copied(obj);
debug_assert!(copied_size == VM::VMObjectModel::get_current_size(obj));
let new_object = self.forward(obj, false);
debug_assert!(
new_object.to_raw_address() >= to,
"whilst forwarding {obj}, the new address {0} should be after the end of the last object {to}",
new_object.to_raw_address()
);
trace!(" copy from {} to {}", obj, new_object);
let end_of_new_object =
VM::VMObjectModel::copy_to(obj, new_object, Address::ZERO);
#[cfg(feature = "vo_bit")]
vo_bit::set_vo_bit(new_object);
to = new_object.to_object_start::<VM>() + copied_size;
debug_assert_eq!(end_of_new_object, to);
self.update_references(worker, new_object);
});
self.pr.reset_cursor(r, to);
});
}
pub fn after_compact(&self, worker: &mut GCWorker<VM>, los: &LargeObjectSpace<VM>) {
self.pr.reset_allocator();
los.enumerate_objects(&mut object_enum::ClosureObjectEnumerator::<_, VM>::new(
&mut |o: ObjectReference| {
self.update_references(worker, o);
},
));
}
}
pub struct CalculateOffsetVector<VM: VMBinding> {
compressor_space: &'static CompressorSpace<VM>,
region: forwarding::CompressorRegion,
cursor: Address,
}
impl<VM: VMBinding> GCWork<VM> for CalculateOffsetVector<VM> {
fn do_work(&mut self, _worker: &mut GCWorker<VM>, _mmtk: &'static MMTK<VM>) {
self.compressor_space
.calculate_offset_vector_for_region(self.region, self.cursor);
}
}
impl<VM: VMBinding> CalculateOffsetVector<VM> {
pub fn new(
compressor_space: &'static CompressorSpace<VM>,
region: forwarding::CompressorRegion,
cursor: Address,
) -> Self {
Self {
compressor_space,
region,
cursor,
}
}
}
pub struct Compact<VM: VMBinding> {
compressor_space: &'static CompressorSpace<VM>,
index: usize,
}
impl<VM: VMBinding> GCWork<VM> for Compact<VM> {
fn do_work(&mut self, worker: &mut GCWorker<VM>, _mmtk: &'static MMTK<VM>) {
self.compressor_space.compact_region(worker, self.index);
}
}
impl<VM: VMBinding> Compact<VM> {
pub fn new(compressor_space: &'static CompressorSpace<VM>, index: usize) -> Self {
Self {
compressor_space,
index,
}
}
}