mmtk/plan/compressor/
global.rsuse super::gc_work::CompressorWorkContext;
use super::gc_work::{
AfterCompact, ForwardingProcessEdges, GenerateWork, MarkingProcessEdges, UpdateReferences,
};
use crate::plan::compressor::mutator::ALLOCATOR_MAPPING;
use crate::plan::global::CreateGeneralPlanArgs;
use crate::plan::global::CreateSpecificPlanArgs;
use crate::plan::global::{BasePlan, CommonPlan};
use crate::plan::plan_constraints::MAX_NON_LOS_ALLOC_BYTES_COPYING_PLAN;
use crate::plan::{AllocationSemantics, Plan, PlanConstraints};
use crate::policy::compressor::CompressorSpace;
use crate::policy::space::Space;
use crate::scheduler::gc_work::*;
use crate::scheduler::{GCWorkScheduler, WorkBucketStage};
use crate::util::alloc::allocators::AllocatorSelector;
use crate::util::heap::gc_trigger::SpaceStats;
#[allow(unused_imports)]
use crate::util::heap::VMRequest;
use crate::util::metadata::side_metadata::SideMetadataContext;
use crate::util::opaque_pointer::*;
use crate::vm::VMBinding;
use enum_map::EnumMap;
use mmtk_macros::{HasSpaces, PlanTraceObject};
#[derive(HasSpaces, PlanTraceObject)]
pub struct Compressor<VM: VMBinding> {
#[parent]
pub common: CommonPlan<VM>,
#[space]
pub compressor_space: CompressorSpace<VM>,
}
pub const COMPRESSOR_CONSTRAINTS: PlanConstraints = PlanConstraints {
max_non_los_default_alloc_bytes: MAX_NON_LOS_ALLOC_BYTES_COPYING_PLAN,
moves_objects: true,
needs_forward_after_liveness: true,
..PlanConstraints::default()
};
impl<VM: VMBinding> Plan for Compressor<VM> {
fn constraints(&self) -> &'static PlanConstraints {
&COMPRESSOR_CONSTRAINTS
}
fn collection_required(&self, space_full: bool, _space: Option<SpaceStats<Self::VM>>) -> bool {
self.base().collection_required(self, space_full)
}
fn common(&self) -> &CommonPlan<VM> {
&self.common
}
fn base(&self) -> &BasePlan<VM> {
&self.common.base
}
fn base_mut(&mut self) -> &mut BasePlan<Self::VM> {
&mut self.common.base
}
fn prepare(&mut self, tls: VMWorkerThread) {
self.common.prepare(tls, true);
self.compressor_space.prepare();
}
fn release(&mut self, tls: VMWorkerThread) {
self.common.release(tls, true);
self.compressor_space.release();
}
fn end_of_gc(&mut self, tls: VMWorkerThread) {
self.common.end_of_gc(tls);
}
fn get_allocator_mapping(&self) -> &'static EnumMap<AllocationSemantics, AllocatorSelector> {
&ALLOCATOR_MAPPING
}
fn schedule_collection(&'static self, scheduler: &GCWorkScheduler<VM>) {
scheduler.work_buckets[WorkBucketStage::Unconstrained]
.add(StopMutators::<CompressorWorkContext<VM>>::new());
scheduler.work_buckets[WorkBucketStage::Prepare]
.add(Prepare::<CompressorWorkContext<VM>>::new(self));
scheduler.work_buckets[WorkBucketStage::CalculateForwarding].add(GenerateWork::new(
&self.compressor_space,
CompressorSpace::<VM>::add_offset_vector_tasks,
));
scheduler.work_buckets[WorkBucketStage::SecondRoots].add(UpdateReferences::<VM>::new());
scheduler.work_buckets[WorkBucketStage::Compact].add(GenerateWork::new(
&self.compressor_space,
CompressorSpace::<VM>::add_compact_tasks,
));
scheduler.work_buckets[WorkBucketStage::Compact].set_sentinel(Box::new(
AfterCompact::<VM>::new(&self.compressor_space, &self.common.los),
));
scheduler.work_buckets[WorkBucketStage::Release]
.add(Release::<CompressorWorkContext<VM>>::new(self));
if !*self.base().options.no_reference_types {
use crate::util::reference_processor::{
PhantomRefProcessing, SoftRefProcessing, WeakRefProcessing,
};
scheduler.work_buckets[WorkBucketStage::SoftRefClosure]
.add(SoftRefProcessing::<MarkingProcessEdges<VM>>::new());
scheduler.work_buckets[WorkBucketStage::WeakRefClosure]
.add(WeakRefProcessing::<VM>::new());
scheduler.work_buckets[WorkBucketStage::PhantomRefClosure]
.add(PhantomRefProcessing::<VM>::new());
use crate::util::reference_processor::RefForwarding;
scheduler.work_buckets[WorkBucketStage::RefForwarding]
.add(RefForwarding::<ForwardingProcessEdges<VM>>::new());
use crate::util::reference_processor::RefEnqueue;
scheduler.work_buckets[WorkBucketStage::Release].add(RefEnqueue::<VM>::new());
}
if !*self.base().options.no_finalizer {
use crate::util::finalizable_processor::{Finalization, ForwardFinalization};
scheduler.work_buckets[WorkBucketStage::FinalRefClosure]
.add(Finalization::<MarkingProcessEdges<VM>>::new());
scheduler.work_buckets[WorkBucketStage::FinalizableForwarding]
.add(ForwardFinalization::<ForwardingProcessEdges<VM>>::new());
}
scheduler.work_buckets[WorkBucketStage::VMRefClosure]
.set_sentinel(Box::new(VMProcessWeakRefs::<MarkingProcessEdges<VM>>::new()));
scheduler.work_buckets[WorkBucketStage::VMRefForwarding]
.add(VMForwardWeakRefs::<ForwardingProcessEdges<VM>>::new());
scheduler.work_buckets[WorkBucketStage::Release].add(VMPostForwarding::<VM>::default());
#[cfg(feature = "analysis")]
{
use crate::util::analysis::GcHookWork;
scheduler.work_buckets[WorkBucketStage::Unconstrained].add(GcHookWork);
}
#[cfg(feature = "sanity")]
scheduler.work_buckets[WorkBucketStage::Final]
.add(crate::util::sanity::sanity_checker::ScheduleSanityGC::<Self>::new(self));
}
fn current_gc_may_move_object(&self) -> bool {
true
}
fn get_used_pages(&self) -> usize {
self.compressor_space.reserved_pages() + self.common.get_used_pages()
}
}
impl<VM: VMBinding> Compressor<VM> {
pub fn new(args: CreateGeneralPlanArgs<VM>) -> Self {
let mut plan_args = CreateSpecificPlanArgs {
global_args: args,
constraints: &COMPRESSOR_CONSTRAINTS,
global_side_metadata_specs: SideMetadataContext::new_global_specs(&[]),
};
let res = Compressor {
compressor_space: CompressorSpace::new(plan_args.get_normal_space_args(
"compressor_space",
true,
false,
VMRequest::discontiguous(),
)),
common: CommonPlan::new(plan_args),
};
res.verify_side_metadata_sanity();
res
}
}