use atomic::Atomic;
use std::sync::atomic::Ordering;
use std::sync::Arc;
use crate::policy::sft::GCWorkerMutRef;
use crate::policy::sft::SFT;
use crate::policy::space::{CommonSpace, Space};
use crate::util::address::Address;
use crate::util::conversions;
use crate::util::heap::gc_trigger::GCTrigger;
use crate::util::heap::layout::vm_layout::vm_layout;
use crate::util::heap::PageResource;
use crate::util::heap::VMRequest;
use crate::util::memory::MmapAnnotation;
use crate::util::memory::MmapStrategy;
use crate::util::metadata::side_metadata::SideMetadataContext;
use crate::util::metadata::side_metadata::SideMetadataSanity;
use crate::util::object_enum::ObjectEnumerator;
use crate::util::opaque_pointer::*;
use crate::util::ObjectReference;
use crate::vm::VMBinding;
pub struct LockFreeImmortalSpace<VM: VMBinding> {
#[allow(unused)]
name: &'static str,
cursor: Atomic<Address>,
limit: Address,
start: Address,
total_bytes: usize,
slow_path_zeroing: bool,
metadata: SideMetadataContext,
gc_trigger: Arc<GCTrigger<VM>>,
}
impl<VM: VMBinding> SFT for LockFreeImmortalSpace<VM> {
fn name(&self) -> &'static str {
self.get_name()
}
fn is_live(&self, _object: ObjectReference) -> bool {
unimplemented!()
}
#[cfg(feature = "object_pinning")]
fn pin_object(&self, _object: ObjectReference) -> bool {
false
}
#[cfg(feature = "object_pinning")]
fn unpin_object(&self, _object: ObjectReference) -> bool {
false
}
#[cfg(feature = "object_pinning")]
fn is_object_pinned(&self, _object: ObjectReference) -> bool {
true
}
fn is_movable(&self) -> bool {
unimplemented!()
}
#[cfg(feature = "sanity")]
fn is_sane(&self) -> bool {
unimplemented!()
}
fn initialize_object_metadata(&self, _object: ObjectReference, _alloc: bool) {
#[cfg(feature = "vo_bit")]
crate::util::metadata::vo_bit::set_vo_bit(_object);
}
#[cfg(feature = "is_mmtk_object")]
fn is_mmtk_object(&self, addr: Address) -> Option<ObjectReference> {
crate::util::metadata::vo_bit::is_vo_bit_set_for_addr(addr)
}
#[cfg(feature = "is_mmtk_object")]
fn find_object_from_internal_pointer(
&self,
ptr: Address,
max_search_bytes: usize,
) -> Option<ObjectReference> {
crate::util::metadata::vo_bit::find_object_from_internal_pointer::<VM>(
ptr,
max_search_bytes,
)
}
fn sft_trace_object(
&self,
_queue: &mut VectorObjectQueue,
_object: ObjectReference,
_worker: GCWorkerMutRef,
) -> ObjectReference {
unreachable!()
}
}
impl<VM: VMBinding> Space<VM> for LockFreeImmortalSpace<VM> {
fn as_space(&self) -> &dyn Space<VM> {
self
}
fn as_sft(&self) -> &(dyn SFT + Sync + 'static) {
self
}
fn get_page_resource(&self) -> &dyn PageResource<VM> {
unimplemented!()
}
fn maybe_get_page_resource_mut(&mut self) -> Option<&mut dyn PageResource<VM>> {
None
}
fn common(&self) -> &CommonSpace<VM> {
unimplemented!()
}
fn get_gc_trigger(&self) -> &GCTrigger<VM> {
&self.gc_trigger
}
fn release_multiple_pages(&mut self, _start: Address) {
panic!("immortalspace only releases pages enmasse")
}
fn initialize_sft(&self, sft_map: &mut dyn crate::policy::sft_map::SFTMap) {
unsafe { sft_map.eager_initialize(self.as_sft(), self.start, self.total_bytes) };
}
fn reserved_pages(&self) -> usize {
let cursor = self.cursor.load(Ordering::Relaxed);
let data_pages = conversions::bytes_to_pages_up(self.limit - cursor);
let meta_pages = self.metadata.calculate_reserved_pages(data_pages);
data_pages + meta_pages
}
fn acquire(&self, _tls: VMThread, pages: usize) -> Address {
trace!("LockFreeImmortalSpace::acquire");
let bytes = conversions::pages_to_bytes(pages);
let start = self
.cursor
.fetch_update(Ordering::Relaxed, Ordering::Relaxed, |addr| {
Some(addr.add(bytes))
})
.expect("update cursor failed");
if start + bytes > self.limit {
panic!("OutOfMemory")
}
if self.slow_path_zeroing {
crate::util::memory::zero(start, bytes);
}
start
}
fn get_name(&self) -> &'static str {
"LockFreeImmortalSpace"
}
fn verify_side_metadata_sanity(&self, side_metadata_sanity_checker: &mut SideMetadataSanity) {
side_metadata_sanity_checker
.verify_metadata_context(std::any::type_name::<Self>(), &self.metadata)
}
fn enumerate_objects(&self, enumerator: &mut dyn ObjectEnumerator) {
enumerator.visit_address_range(self.start, self.start + self.total_bytes);
}
}
use crate::plan::{ObjectQueue, VectorObjectQueue};
use crate::scheduler::GCWorker;
use crate::util::copy::CopySemantics;
impl<VM: VMBinding> crate::policy::gc_work::PolicyTraceObject<VM> for LockFreeImmortalSpace<VM> {
fn trace_object<Q: ObjectQueue, const KIND: crate::policy::gc_work::TraceKind>(
&self,
_queue: &mut Q,
_object: ObjectReference,
_copy: Option<CopySemantics>,
_worker: &mut GCWorker<VM>,
) -> ObjectReference {
unreachable!()
}
fn may_move_objects<const KIND: crate::policy::gc_work::TraceKind>() -> bool {
unreachable!()
}
}
impl<VM: VMBinding> LockFreeImmortalSpace<VM> {
#[allow(dead_code)] pub fn new(args: crate::policy::space::PlanCreateSpaceArgs<VM>) -> Self {
let slow_path_zeroing = args.zeroed;
let total_bytes = match *args.options.gc_trigger {
crate::util::options::GCTriggerSelector::FixedHeapSize(bytes) => bytes,
_ => unimplemented!(),
};
assert!(
total_bytes <= vm_layout().available_bytes(),
"Initial requested memory ({} bytes) overflows the heap. Max heap size is {} bytes.",
total_bytes,
vm_layout().available_bytes()
);
let aligned_total_bytes = crate::util::conversions::raw_align_up(
total_bytes,
crate::util::heap::vm_layout::BYTES_IN_CHUNK,
);
let vmrequest = VMRequest::fixed_size(aligned_total_bytes);
let VMRequest::Extent { extent, top } = vmrequest else {
unreachable!()
};
let start = args.heap.reserve(extent, top);
let space = Self {
name: args.name,
cursor: Atomic::new(start),
limit: start + aligned_total_bytes,
start,
total_bytes: aligned_total_bytes,
slow_path_zeroing,
metadata: SideMetadataContext {
global: args.global_side_metadata_specs,
local: vec![],
},
gc_trigger: args.gc_trigger,
};
let strategy = MmapStrategy::new(
*args.options.transparent_hugepages,
crate::util::memory::MmapProtection::ReadWrite,
);
crate::util::memory::dzmmap_noreplace(
start,
aligned_total_bytes,
strategy,
&MmapAnnotation::Space {
name: space.get_name(),
},
)
.unwrap();
space
.metadata
.try_map_metadata_space(start, aligned_total_bytes, space.get_name())
.unwrap_or_else(|e| {
panic!("failed to mmap meta memory: {e}")
});
space
}
}