use std::ops::Range;
use super::sft::SFT;
use super::space::{CommonSpace, Space};
use crate::plan::VectorObjectQueue;
use crate::policy::gc_work::{TraceKind, TRACE_KIND_TRANSITIVE_PIN};
use crate::policy::sft::GCWorkerMutRef;
use crate::scheduler::GCWorker;
use crate::util::alloc::allocator::align_allocation_no_fill;
use crate::util::constants::LOG_BYTES_IN_WORD;
use crate::util::copy::CopySemantics;
use crate::util::heap::{MonotonePageResource, PageResource};
use crate::util::metadata::{extract_side_metadata, vo_bit};
use crate::util::object_enum::{self, ObjectEnumerator};
use crate::util::{Address, ObjectReference};
use crate::{vm::*, ObjectQueue};
use atomic::Ordering;
pub(crate) const TRACE_KIND_MARK: TraceKind = 0;
pub(crate) const TRACE_KIND_FORWARD: TraceKind = 1;
pub struct MarkCompactSpace<VM: VMBinding> {
common: CommonSpace<VM>,
pr: MonotonePageResource<VM>,
}
const GC_MARK_BIT_MASK: u8 = 1;
pub const GC_EXTRA_HEADER_WORD: usize = 1;
const GC_EXTRA_HEADER_BYTES: usize = GC_EXTRA_HEADER_WORD << LOG_BYTES_IN_WORD;
impl<VM: VMBinding> SFT for MarkCompactSpace<VM> {
fn name(&self) -> &str {
self.get_name()
}
fn get_forwarded_object(&self, object: ObjectReference) -> Option<ObjectReference> {
Self::get_header_forwarding_pointer(object)
}
fn is_live(&self, object: ObjectReference) -> bool {
Self::is_marked(object)
}
#[cfg(feature = "object_pinning")]
fn pin_object(&self, _object: ObjectReference) -> bool {
panic!("Cannot pin/unpin objects of MarkCompactSpace.")
}
#[cfg(feature = "object_pinning")]
fn unpin_object(&self, _object: ObjectReference) -> bool {
panic!("Cannot pin/unpin objects of MarkCompactSpace.")
}
#[cfg(feature = "object_pinning")]
fn is_object_pinned(&self, _object: ObjectReference) -> bool {
false
}
fn is_movable(&self) -> bool {
true
}
fn initialize_object_metadata(&self, object: ObjectReference, _alloc: bool) {
crate::util::metadata::vo_bit::set_vo_bit(object);
}
#[cfg(feature = "sanity")]
fn is_sane(&self) -> bool {
true
}
#[cfg(feature = "is_mmtk_object")]
fn is_mmtk_object(&self, addr: Address) -> Option<ObjectReference> {
crate::util::metadata::vo_bit::is_vo_bit_set_for_addr(addr)
}
#[cfg(feature = "is_mmtk_object")]
fn find_object_from_internal_pointer(
&self,
ptr: Address,
max_search_bytes: usize,
) -> Option<ObjectReference> {
crate::util::metadata::vo_bit::find_object_from_internal_pointer::<VM>(
ptr,
max_search_bytes,
)
}
fn sft_trace_object(
&self,
_queue: &mut VectorObjectQueue,
_object: ObjectReference,
_worker: GCWorkerMutRef,
) -> ObjectReference {
panic!("sft_trace_object() cannot be used with mark compact space")
}
}
impl<VM: VMBinding> Space<VM> for MarkCompactSpace<VM> {
fn as_space(&self) -> &dyn Space<VM> {
self
}
fn as_sft(&self) -> &(dyn SFT + Sync + 'static) {
self
}
fn get_page_resource(&self) -> &dyn PageResource<VM> {
&self.pr
}
fn maybe_get_page_resource_mut(&mut self) -> Option<&mut dyn PageResource<VM>> {
Some(&mut self.pr)
}
fn common(&self) -> &CommonSpace<VM> {
&self.common
}
fn initialize_sft(&self, sft_map: &mut dyn crate::policy::sft_map::SFTMap) {
self.common().initialize_sft(self.as_sft(), sft_map)
}
fn release_multiple_pages(&mut self, _start: Address) {
panic!("markcompactspace only releases pages enmasse")
}
fn enumerate_objects(&self, enumerator: &mut dyn ObjectEnumerator) {
object_enum::enumerate_blocks_from_monotonic_page_resource(enumerator, &self.pr);
}
}
impl<VM: VMBinding> crate::policy::gc_work::PolicyTraceObject<VM> for MarkCompactSpace<VM> {
fn trace_object<Q: ObjectQueue, const KIND: crate::policy::gc_work::TraceKind>(
&self,
queue: &mut Q,
object: ObjectReference,
_copy: Option<CopySemantics>,
_worker: &mut GCWorker<VM>,
) -> ObjectReference {
debug_assert!(
KIND != TRACE_KIND_TRANSITIVE_PIN,
"MarkCompact does not support transitive pin trace."
);
if KIND == TRACE_KIND_MARK {
self.trace_mark_object(queue, object)
} else if KIND == TRACE_KIND_FORWARD {
self.trace_forward_object(queue, object)
} else {
unreachable!()
}
}
fn may_move_objects<const KIND: crate::policy::gc_work::TraceKind>() -> bool {
if KIND == TRACE_KIND_MARK {
false
} else if KIND == TRACE_KIND_FORWARD {
true
} else {
unreachable!()
}
}
}
impl<VM: VMBinding> MarkCompactSpace<VM> {
pub const HEADER_RESERVED_IN_BYTES: usize = if VM::MAX_ALIGNMENT > GC_EXTRA_HEADER_BYTES {
VM::MAX_ALIGNMENT
} else {
GC_EXTRA_HEADER_BYTES
}
.next_power_of_two();
fn header_forwarding_pointer_address(object: ObjectReference) -> Address {
object.to_object_start::<VM>() - GC_EXTRA_HEADER_BYTES
}
fn get_header_forwarding_pointer(object: ObjectReference) -> Option<ObjectReference> {
let addr = unsafe { Self::header_forwarding_pointer_address(object).load::<Address>() };
ObjectReference::from_raw_address(addr)
}
fn store_header_forwarding_pointer(
object: ObjectReference,
forwarding_pointer: ObjectReference,
) {
unsafe {
Self::header_forwarding_pointer_address(object)
.store::<ObjectReference>(forwarding_pointer);
}
}
fn clear_header_forwarding_pointer(object: ObjectReference) {
crate::util::memory::zero(
Self::header_forwarding_pointer_address(object),
GC_EXTRA_HEADER_BYTES,
);
}
pub fn new(args: crate::policy::space::PlanCreateSpaceArgs<VM>) -> Self {
let vm_map = args.vm_map;
let is_discontiguous = args.vmrequest.is_discontiguous();
let local_specs = extract_side_metadata(&[*VM::VMObjectModel::LOCAL_MARK_BIT_SPEC]);
let common = CommonSpace::new(args.into_policy_args(true, false, local_specs));
MarkCompactSpace {
pr: if is_discontiguous {
MonotonePageResource::new_discontiguous(vm_map)
} else {
MonotonePageResource::new_contiguous(common.start, common.extent, vm_map)
},
common,
}
}
pub fn prepare(&self) {}
pub fn release(&self) {}
pub fn trace_mark_object<Q: ObjectQueue>(
&self,
queue: &mut Q,
object: ObjectReference,
) -> ObjectReference {
debug_assert!(
crate::util::metadata::vo_bit::is_vo_bit_set(object),
"{:x}: VO bit not set",
object
);
if MarkCompactSpace::<VM>::test_and_mark(object) {
queue.enqueue(object);
}
object
}
pub fn trace_forward_object<Q: ObjectQueue>(
&self,
queue: &mut Q,
object: ObjectReference,
) -> ObjectReference {
debug_assert!(
crate::util::metadata::vo_bit::is_vo_bit_set(object),
"{:x}: VO bit not set",
object
);
if MarkCompactSpace::<VM>::test_and_clear_mark(object) {
queue.enqueue(object);
}
Self::get_header_forwarding_pointer(object)
.unwrap_or_else(|| panic!("Object {object} does not have a forwarding pointer"))
}
pub fn test_and_mark(object: ObjectReference) -> bool {
loop {
let old_value = VM::VMObjectModel::LOCAL_MARK_BIT_SPEC.load_atomic::<VM, u8>(
object,
None,
Ordering::SeqCst,
);
let mark_bit = old_value & GC_MARK_BIT_MASK;
if mark_bit != 0 {
return false;
}
if VM::VMObjectModel::LOCAL_MARK_BIT_SPEC
.compare_exchange_metadata::<VM, u8>(
object,
old_value,
1,
None,
Ordering::SeqCst,
Ordering::SeqCst,
)
.is_ok()
{
break;
}
}
true
}
pub fn test_and_clear_mark(object: ObjectReference) -> bool {
loop {
let old_value = VM::VMObjectModel::LOCAL_MARK_BIT_SPEC.load_atomic::<VM, u8>(
object,
None,
Ordering::SeqCst,
);
let mark_bit = old_value & GC_MARK_BIT_MASK;
if mark_bit == 0 {
return false;
}
if VM::VMObjectModel::LOCAL_MARK_BIT_SPEC
.compare_exchange_metadata::<VM, u8>(
object,
old_value,
0,
None,
Ordering::SeqCst,
Ordering::SeqCst,
)
.is_ok()
{
break;
}
}
true
}
pub fn is_marked(object: ObjectReference) -> bool {
let old_value = VM::VMObjectModel::LOCAL_MARK_BIT_SPEC.load_atomic::<VM, u8>(
object,
None,
Ordering::SeqCst,
);
let mark_bit = old_value & GC_MARK_BIT_MASK;
mark_bit != 0
}
fn to_be_compacted(object: &ObjectReference) -> bool {
Self::is_marked(*object)
}
fn linear_scan_objects(&self, range: Range<Address>) -> impl Iterator<Item = ObjectReference> {
crate::util::linear_scan::ObjectIterator::<VM, MarkCompactObjectSize<VM>, true>::new(
range.start,
range.end,
)
}
pub fn calculate_forwarding_pointer(&self) {
let mut to_iter = self.pr.iterate_allocated_regions();
let Some((mut to_cursor, mut to_size)) = to_iter.next() else {
return;
};
let mut to_end = to_cursor + to_size;
for (from_start, size) in self.pr.iterate_allocated_regions() {
let from_end = from_start + size;
for obj in self
.linear_scan_objects(from_start..from_end)
.filter(Self::to_be_compacted)
{
let copied_size =
VM::VMObjectModel::get_size_when_copied(obj) + Self::HEADER_RESERVED_IN_BYTES;
let align = VM::VMObjectModel::get_align_when_copied(obj);
let offset = VM::VMObjectModel::get_align_offset_when_copied(obj);
to_cursor = align_allocation_no_fill::<VM>(to_cursor, align, offset);
if to_cursor + copied_size > to_end {
(to_cursor, to_size) = to_iter.next().unwrap();
to_end = to_cursor + to_size;
to_cursor = align_allocation_no_fill::<VM>(to_cursor, align, offset);
debug_assert!(to_cursor + copied_size <= to_end);
}
let new_obj = VM::VMObjectModel::get_reference_when_copied_to(
obj,
to_cursor + Self::HEADER_RESERVED_IN_BYTES,
);
Self::store_header_forwarding_pointer(obj, new_obj);
trace!(
"Calculate forward: {} (size when copied = {}) ~> {} (size = {})",
obj,
VM::VMObjectModel::get_size_when_copied(obj),
to_cursor,
copied_size
);
to_cursor += copied_size;
}
}
}
pub fn compact(&self) {
let mut to = Address::ZERO;
for (from_start, size) in self.pr.iterate_allocated_regions() {
let from_end = from_start + size;
for obj in self.linear_scan_objects(from_start..from_end) {
let copied_size = VM::VMObjectModel::get_size_when_copied(obj);
vo_bit::unset_vo_bit(obj);
let maybe_forwarding_pointer = Self::get_header_forwarding_pointer(obj);
if let Some(forwarding_pointer) = maybe_forwarding_pointer {
trace!("Compact {} to {}", obj, forwarding_pointer);
let new_object = forwarding_pointer;
Self::clear_header_forwarding_pointer(new_object);
trace!(" copy from {} to {}", obj, new_object);
let end_of_new_object =
VM::VMObjectModel::copy_to(obj, new_object, Address::ZERO);
vo_bit::set_vo_bit(new_object);
to = new_object.to_object_start::<VM>() + copied_size;
debug_assert_eq!(end_of_new_object, to);
} else {
trace!("Skipping dead object {}", obj);
}
}
}
debug!("Compact end: to = {}", to);
self.pr.reset_cursor(to);
}
}
struct MarkCompactObjectSize<VM>(std::marker::PhantomData<VM>);
impl<VM: VMBinding> crate::util::linear_scan::LinearScanObjectSize for MarkCompactObjectSize<VM> {
fn size(object: ObjectReference) -> usize {
VM::VMObjectModel::get_current_size(object)
}
}