Struct mmtk::policy::immix::immixspace::ImmixSpace
source · pub struct ImmixSpace<VM: VMBinding> {
common: CommonSpace<VM>,
pr: BlockPageResource<VM, Block>,
pub chunk_map: ChunkMap,
pub line_mark_state: AtomicU8,
line_unavail_state: AtomicU8,
pub reusable_blocks: ReusableBlockPool,
pub(super) defrag: Defrag,
lines_consumed: AtomicUsize,
mark_state: u8,
scheduler: Arc<GCWorkScheduler<VM>>,
space_args: ImmixSpaceArgs,
}
Fields§
§common: CommonSpace<VM>
§pr: BlockPageResource<VM, Block>
§chunk_map: ChunkMap
Allocation status for all chunks in immix space
line_mark_state: AtomicU8
Current line mark state
Line mark state in previous GC
reusable_blocks: ReusableBlockPool
A list of all reusable blocks
defrag: Defrag
Defrag utilities
lines_consumed: AtomicUsize
How many lines have been consumed since last GC?
mark_state: u8
Object mark state
scheduler: Arc<GCWorkScheduler<VM>>
Work packet scheduler
space_args: ImmixSpaceArgs
Some settings for this space
Implementations§
source§impl<VM: VMBinding> ImmixSpace<VM>
impl<VM: VMBinding> ImmixSpace<VM>
const UNMARKED_STATE: u8 = 0u8
const MARKED_STATE: u8 = 1u8
sourcefn side_metadata_specs() -> Vec<SideMetadataSpec>
fn side_metadata_specs() -> Vec<SideMetadataSpec>
Get side metadata specs
pub fn new( args: PlanCreateSpaceArgs<'_, VM>, space_args: ImmixSpaceArgs ) -> Self
sourcepub fn flush_page_resource(&self)
pub fn flush_page_resource(&self)
Flush the thread-local queues in BlockPageResource
sourcepub fn defrag_headroom_pages(&self) -> usize
pub fn defrag_headroom_pages(&self) -> usize
Get the number of defrag headroom pages.
sourcepub fn decide_whether_to_defrag(
&self,
emergency_collection: bool,
collect_whole_heap: bool,
collection_attempts: usize,
user_triggered_collection: bool,
full_heap_system_gc: bool
) -> bool
pub fn decide_whether_to_defrag( &self, emergency_collection: bool, collect_whole_heap: bool, collection_attempts: usize, user_triggered_collection: bool, full_heap_system_gc: bool ) -> bool
check if the current GC should do defragmentation.
sourcefn scheduler(&self) -> &GCWorkScheduler<VM>
fn scheduler(&self) -> &GCWorkScheduler<VM>
Get work packet scheduler
pub fn prepare(&mut self, major_gc: bool, plan_stats: StatsForDefrag)
sourcepub fn end_of_gc(&mut self) -> bool
pub fn end_of_gc(&mut self) -> bool
This is called when a GC finished. Return whether this GC was a defrag GC, as a plan may want to know this.
sourcefn generate_sweep_tasks(&self) -> Vec<Box<dyn GCWork<VM>>>
fn generate_sweep_tasks(&self) -> Vec<Box<dyn GCWork<VM>>>
Generate chunk sweep tasks
sourcepub fn release_block(&self, block: Block)
pub fn release_block(&self, block: Block)
Release a block.
sourcepub fn get_clean_block(&self, tls: VMThread, copy: bool) -> Option<Block>
pub fn get_clean_block(&self, tls: VMThread, copy: bool) -> Option<Block>
Allocate a clean block.
sourcepub fn get_reusable_block(&self, copy: bool) -> Option<Block>
pub fn get_reusable_block(&self, copy: bool) -> Option<Block>
Pop a reusable block from the reusable block list.
sourcepub fn trace_object_without_moving(
&self,
queue: &mut impl ObjectQueue,
object: ObjectReference
) -> ObjectReference
pub fn trace_object_without_moving( &self, queue: &mut impl ObjectQueue, object: ObjectReference ) -> ObjectReference
Trace and mark objects without evacuation.
sourcepub fn trace_object_with_opportunistic_copy(
&self,
queue: &mut impl ObjectQueue,
object: ObjectReference,
semantics: CopySemantics,
worker: &mut GCWorker<VM>,
nursery_collection: bool
) -> ObjectReference
pub fn trace_object_with_opportunistic_copy( &self, queue: &mut impl ObjectQueue, object: ObjectReference, semantics: CopySemantics, worker: &mut GCWorker<VM>, nursery_collection: bool ) -> ObjectReference
Trace object and do evacuation if required.
fn unlog_object_if_needed(&self, object: ObjectReference)
sourcepub fn mark_lines(&self, object: ObjectReference)
pub fn mark_lines(&self, object: ObjectReference)
Mark all the lines that the given object spans.
sourcefn attempt_mark(&self, object: ObjectReference, mark_state: u8) -> bool
fn attempt_mark(&self, object: ObjectReference, mark_state: u8) -> bool
Atomically mark an object.
sourcefn is_marked_with(&self, object: ObjectReference, mark_state: u8) -> bool
fn is_marked_with(&self, object: ObjectReference, mark_state: u8) -> bool
Check if an object is marked.
pub(crate) fn is_marked(&self, object: ObjectReference) -> bool
sourcefn is_pinned(&self, _object: ObjectReference) -> bool
fn is_pinned(&self, _object: ObjectReference) -> bool
Check if an object is pinned.
sourcepub fn get_next_available_lines(
&self,
search_start: Line
) -> Option<(Line, Line)>
pub fn get_next_available_lines( &self, search_start: Line ) -> Option<(Line, Line)>
Hole searching.
Linearly scan lines in a block to search for the next hole, starting from the given line. If we find available lines, return a tuple of the start line and the end line (non-inclusive).
Returns None if the search could not find any more holes.
pub fn is_last_gc_exhaustive(did_defrag_for_last_gc: bool) -> bool
pub(crate) fn get_pages_allocated(&self) -> usize
sourcefn post_copy(&self, object: ObjectReference, _bytes: usize)
fn post_copy(&self, object: ObjectReference, _bytes: usize)
Post copy routine for Immix copy contexts
Trait Implementations§
source§impl<VM: VMBinding> PolicyTraceObject<VM> for ImmixSpace<VM>
impl<VM: VMBinding> PolicyTraceObject<VM> for ImmixSpace<VM>
source§fn trace_object<Q: ObjectQueue, const KIND: u8>(
&self,
queue: &mut Q,
object: ObjectReference,
copy: Option<CopySemantics>,
worker: &mut GCWorker<VM>
) -> ObjectReference
fn trace_object<Q: ObjectQueue, const KIND: u8>( &self, queue: &mut Q, object: ObjectReference, copy: Option<CopySemantics>, worker: &mut GCWorker<VM> ) -> ObjectReference
copy
to be a Some
value.source§fn post_scan_object(&self, object: ObjectReference)
fn post_scan_object(&self, object: ObjectReference)
source§fn may_move_objects<const KIND: u8>() -> bool
fn may_move_objects<const KIND: u8>() -> bool
source§impl<VM: VMBinding> SFT for ImmixSpace<VM>
impl<VM: VMBinding> SFT for ImmixSpace<VM>
source§fn get_forwarded_object(
&self,
object: ObjectReference
) -> Option<ObjectReference>
fn get_forwarded_object( &self, object: ObjectReference ) -> Option<ObjectReference>
source§fn is_live(&self, object: ObjectReference) -> bool
fn is_live(&self, object: ObjectReference) -> bool
fn pin_object(&self, object: ObjectReference) -> bool
fn unpin_object(&self, object: ObjectReference) -> bool
fn is_object_pinned(&self, object: ObjectReference) -> bool
source§fn is_movable(&self) -> bool
fn is_movable(&self) -> bool
source§fn is_sane(&self) -> bool
fn is_sane(&self) -> bool
source§fn initialize_object_metadata(&self, _object: ObjectReference, _alloc: bool)
fn initialize_object_metadata(&self, _object: ObjectReference, _alloc: bool)
source§fn is_mmtk_object(&self, addr: Address) -> Option<ObjectReference>
fn is_mmtk_object(&self, addr: Address) -> Option<ObjectReference>
addr
a valid object reference to an object allocated in this space?
This default implementation works for all spaces that use MMTk’s mapper to allocate memory.
Some spaces, like MallocSpace
, use third-party libraries to allocate memory.
Such spaces needs to override this method.fn find_object_from_internal_pointer( &self, ptr: Address, max_search_bytes: usize ) -> Option<ObjectReference>
source§fn sft_trace_object(
&self,
_queue: &mut VectorObjectQueue,
_object: ObjectReference,
_worker: GCWorkerMutRef<'_>
) -> ObjectReference
fn sft_trace_object( &self, _queue: &mut VectorObjectQueue, _object: ObjectReference, _worker: GCWorkerMutRef<'_> ) -> ObjectReference
SFTProcessEdges
provides an easy way for most plans to trace objects without the need to implement any plan-specific
code. However, tracing objects for some policies are more complicated, and they do not provide an
implementation of this method. For example, mark compact space requires trace twice in each GC.
Immix has defrag trace and fast trace.source§fn is_reachable(&self, object: ObjectReference) -> bool
fn is_reachable(&self, object: ObjectReference) -> bool
is_live = true
but are actually unreachable.source§fn is_in_space(&self, _object: ObjectReference) -> bool
fn is_in_space(&self, _object: ObjectReference) -> bool
source§impl<VM: VMBinding> Space<VM> for ImmixSpace<VM>
impl<VM: VMBinding> Space<VM> for ImmixSpace<VM>
fn as_space(&self) -> &dyn Space<VM>
fn as_sft(&self) -> &(dyn SFT + Sync + 'static)
fn get_page_resource(&self) -> &dyn PageResource<VM>
source§fn maybe_get_page_resource_mut(&mut self) -> Option<&mut dyn PageResource<VM>>
fn maybe_get_page_resource_mut(&mut self) -> Option<&mut dyn PageResource<VM>>
None
if the space does not
have a page resource.fn common(&self) -> &CommonSpace<VM>
source§fn initialize_sft(&self, sft_map: &mut dyn SFTMap)
fn initialize_sft(&self, sft_map: &mut dyn SFTMap)
fn release_multiple_pages(&mut self, _start: Address)
source§fn set_copy_for_sft_trace(&mut self, _semantics: Option<CopySemantics>)
fn set_copy_for_sft_trace(&mut self, _semantics: Option<CopySemantics>)
source§fn enumerate_objects(&self, enumerator: &mut dyn ObjectEnumerator)
fn enumerate_objects(&self, enumerator: &mut dyn ObjectEnumerator)
source§fn will_oom_on_acquire(&self, tls: VMThread, size: usize) -> bool
fn will_oom_on_acquire(&self, tls: VMThread, size: usize) -> bool
usize::MAX
), it breaks the assumptions of our implementation of
page resource, vm map, etc. This check prevents that, and allows us to
handle the OOM case.
Each allocator that may request an arbitrary size should call this method before
acquring memory from the space. For example, bump pointer allocator and large object
allocator need to call this method. On the other hand, allocators that only allocate
memory in fixed size blocks do not need to call this method.
An allocator should call this method before doing any computation on the size to
avoid arithmatic overflow. If we have to do computation in the allocation fastpath and
overflow happens there, there is nothing we can do about it.
Return a boolean to indicate if we will be out of memory, determined by the check.fn acquire(&self, tls: VMThread, pages: usize) -> Address
fn address_in_space(&self, start: Address) -> bool
fn in_space(&self, object: ObjectReference) -> bool
source§fn grow_space(&self, start: Address, bytes: usize, new_chunk: bool)
fn grow_space(&self, start: Address, bytes: usize, new_chunk: bool)
source§fn ensure_mapped(&self)
fn ensure_mapped(&self)
fn reserved_pages(&self) -> usize
source§fn available_physical_pages(&self) -> usize
fn available_physical_pages(&self) -> usize
fn get_name(&self) -> &'static str
fn get_gc_trigger(&self) -> &GCTrigger<VM>
source§fn verify_side_metadata_sanity(
&self,
side_metadata_sanity_checker: &mut SideMetadataSanity
)
fn verify_side_metadata_sanity( &self, side_metadata_sanity_checker: &mut SideMetadataSanity )
extreme_assertions
feature is active.
Internally this calls verify_metadata_context() from util::metadata::sanity
Read moreimpl<VM: VMBinding> Sync for ImmixSpace<VM>
Auto Trait Implementations§
impl<VM> !RefUnwindSafe for ImmixSpace<VM>
impl<VM> Send for ImmixSpace<VM>
impl<VM> Unpin for ImmixSpace<VM>where
VM: Unpin,
impl<VM> !UnwindSafe for ImmixSpace<VM>
Blanket Implementations§
source§impl<T> BorrowMut<T> for Twhere
T: ?Sized,
impl<T> BorrowMut<T> for Twhere
T: ?Sized,
source§fn borrow_mut(&mut self) -> &mut T
fn borrow_mut(&mut self) -> &mut T
§impl<T> Downcast for Twhere
T: Any,
impl<T> Downcast for Twhere
T: Any,
§fn into_any(self: Box<T>) -> Box<dyn Any>
fn into_any(self: Box<T>) -> Box<dyn Any>
Box<dyn Trait>
(where Trait: Downcast
) to Box<dyn Any>
. Box<dyn Any>
can
then be further downcast
into Box<ConcreteType>
where ConcreteType
implements Trait
.§fn into_any_rc(self: Rc<T>) -> Rc<dyn Any>
fn into_any_rc(self: Rc<T>) -> Rc<dyn Any>
Rc<Trait>
(where Trait: Downcast
) to Rc<Any>
. Rc<Any>
can then be
further downcast
into Rc<ConcreteType>
where ConcreteType
implements Trait
.§fn as_any(&self) -> &(dyn Any + 'static)
fn as_any(&self) -> &(dyn Any + 'static)
&Trait
(where Trait: Downcast
) to &Any
. This is needed since Rust cannot
generate &Any
’s vtable from &Trait
’s.§fn as_any_mut(&mut self) -> &mut (dyn Any + 'static)
fn as_any_mut(&mut self) -> &mut (dyn Any + 'static)
&mut Trait
(where Trait: Downcast
) to &Any
. This is needed since Rust cannot
generate &mut Any
’s vtable from &mut Trait
’s.§impl<T> DowncastSync for T
impl<T> DowncastSync for T
source§impl<T> IntoEither for T
impl<T> IntoEither for T
source§fn into_either(self, into_left: bool) -> Either<Self, Self>
fn into_either(self, into_left: bool) -> Either<Self, Self>
self
into a Left
variant of Either<Self, Self>
if into_left
is true
.
Converts self
into a Right
variant of Either<Self, Self>
otherwise. Read moresource§fn into_either_with<F>(self, into_left: F) -> Either<Self, Self>
fn into_either_with<F>(self, into_left: F) -> Either<Self, Self>
self
into a Left
variant of Either<Self, Self>
if into_left(&self)
returns true
.
Converts self
into a Right
variant of Either<Self, Self>
otherwise. Read more