pub struct MallocSpace<VM: VMBinding> {
    phantom: PhantomData<VM>,
    active_bytes: AtomicUsize,
    active_pages: AtomicUsize,
    pub chunk_addr_min: Atomic<Address>,
    pub chunk_addr_max: Atomic<Address>,
    metadata: SideMetadataContext,
    scheduler: Arc<GCWorkScheduler<VM>>,
    gc_trigger: Arc<GCTrigger<VM>>,
    active_mem: Mutex<HashMap<Address, usize>>,
    pub total_work_packets: AtomicU32,
    pub completed_work_packets: AtomicU32,
    pub work_live_bytes: AtomicUsize,
}
Expand description

This space uses malloc to get new memory, and performs mark-sweep for the memory.

Fields§

§phantom: PhantomData<VM>§active_bytes: AtomicUsize§active_pages: AtomicUsize§chunk_addr_min: Atomic<Address>§chunk_addr_max: Atomic<Address>§metadata: SideMetadataContext§scheduler: Arc<GCWorkScheduler<VM>>

Work packet scheduler

§gc_trigger: Arc<GCTrigger<VM>>§active_mem: Mutex<HashMap<Address, usize>>§total_work_packets: AtomicU32§completed_work_packets: AtomicU32§work_live_bytes: AtomicUsize

Implementations§

source§

impl<VM: VMBinding> MallocSpace<VM>

source

pub fn extend_global_side_metadata_specs(specs: &mut Vec<SideMetadataSpec>)

source

pub fn new(args: PlanCreateSpaceArgs<'_, VM>) -> Self

source

fn set_page_mark(&self, start: Address, size: usize)

Set multiple pages, starting from the given address, for the given size, and increase the active page count if we set any page mark in the region. This is a thread-safe method, and can be used during mutator phase when mutators may access the same page. Performance-wise, this method may impose overhead, as we are doing a compare-exchange for every page in the range.

source

unsafe fn unset_page_mark(&self, start: Address, size: usize)

Unset multiple pages, starting from the given address, for the given size, and decrease the active page count if we unset any page mark in the region

§Safety

We need to ensure that only one GC thread is accessing the range.

source

pub fn alloc( &self, tls: VMThread, size: usize, align: usize, offset: usize ) -> Address

source

pub fn free(&self, addr: Address)

source

fn free_internal(&self, addr: Address, bytes: usize, offset_malloc_bit: bool)

source

pub fn trace_object<Q: ObjectQueue>( &self, queue: &mut Q, object: ObjectReference ) -> ObjectReference

source

fn map_metadata_and_update_bound(&self, addr: Address, size: usize)

source

pub fn prepare(&mut self)

source

pub fn release(&mut self)

source

pub fn end_of_gc(&mut self)

source

pub fn sweep_chunk(&self, chunk_start: Address)

source

fn get_malloc_addr_size(object: ObjectReference) -> (Address, bool, usize)

Given an object in MallocSpace, return its malloc address, whether it is an offset malloc, and malloc size

source

fn clean_up_empty_chunk(&self, chunk_start: Address)

Clean up for an empty chunk

source

fn sweep_object( &self, object: ObjectReference, empty_page_start: &mut Address ) -> bool

Sweep an object if it is dead, and unset page marks for empty pages before this object. Return true if the object is swept.

source

fn debug_sweep_chunk_done(&self, live_bytes_in_the_chunk: usize)

Used when each chunk is done. Only called in debug build.

source

fn sweep_chunk_mark_on_side( &self, chunk_start: Address, mark_bit_spec: SideMetadataSpec )

This function is called when the mark bits sit on the side metadata. This has been optimized with the use of bulk loading and bulk zeroing of metadata.

This function uses non-atomic accesses to side metadata (although these non-atomic accesses should not have race conditions associated with them) as well as calls libc functions (malloc_usable_size(), free())

source

fn sweep_chunk_mark_in_header(&self, chunk_start: Address)

This sweep function is called when the mark bit sits in the object header

This function uses non-atomic accesses to side metadata (although these non-atomic accesses should not have race conditions associated with them) as well as calls libc functions (malloc_usable_size(), free())

source

fn sweep_each_object_in_chunk(&self, chunk_start: Address)

Trait Implementations§

source§

impl<VM: VMBinding> PolicyTraceObject<VM> for MallocSpace<VM>

source§

fn trace_object<Q: ObjectQueue, const KIND: u8>( &self, queue: &mut Q, object: ObjectReference, _copy: Option<CopySemantics>, _worker: &mut GCWorker<VM> ) -> ObjectReference

Trace object in the policy. If the policy copies objects, we should expect copy to be a Some value.
source§

fn may_move_objects<const KIND: u8>() -> bool

Return whether the policy moves objects.
source§

fn post_scan_object(&self, _object: ObjectReference)

Policy-specific post-scan-object hook. It is called after scanning each object in this space.
source§

impl<VM: VMBinding> SFT for MallocSpace<VM>

source§

fn is_mmtk_object(&self, addr: Address) -> Option<ObjectReference>

For malloc space, we just use the side metadata.

source§

fn name(&self) -> &'static str

The space name
source§

fn is_live(&self, object: ObjectReference) -> bool

Is the object live, determined by the policy?
source§

fn pin_object(&self, _object: ObjectReference) -> bool

source§

fn unpin_object(&self, _object: ObjectReference) -> bool

source§

fn is_object_pinned(&self, _object: ObjectReference) -> bool

source§

fn is_movable(&self) -> bool

Is the object movable, determined by the policy? E.g. the policy is non-moving, or the object is pinned.
source§

fn is_sane(&self) -> bool

Is the object sane? A policy should return false if there is any abnormality about object - the sanity checker will fail if an object is not sane.
source§

fn is_in_space(&self, object: ObjectReference) -> bool

Is the object managed by MMTk? For most cases, if we find the sft for an object, that means the object is in the space and managed by MMTk. However, for some spaces, like MallocSpace, we mark the entire chunk in the SFT table as a malloc space, but only some of the addresses in the space contain actual MMTk objects. So they need a further check.
source§

fn find_object_from_internal_pointer( &self, ptr: Address, max_search_bytes: usize ) -> Option<ObjectReference>

source§

fn initialize_object_metadata(&self, object: ObjectReference, _alloc: bool)

Initialize object metadata (in the header, or in the side metadata).
source§

fn sft_trace_object( &self, queue: &mut VectorObjectQueue, object: ObjectReference, _worker: GCWorkerMutRef<'_> ) -> ObjectReference

Trace objects through SFT. This along with SFTProcessEdges provides an easy way for most plans to trace objects without the need to implement any plan-specific code. However, tracing objects for some policies are more complicated, and they do not provide an implementation of this method. For example, mark compact space requires trace twice in each GC. Immix has defrag trace and fast trace.
source§

fn get_forwarded_object( &self, _object: ObjectReference ) -> Option<ObjectReference>

Get forwarding pointer if the object is forwarded.
source§

fn is_reachable(&self, object: ObjectReference) -> bool

Is the object reachable, determined by the policy? Note: Objects in ImmortalSpace may have is_live = true but are actually unreachable.
source§

impl<VM: VMBinding> Space<VM> for MallocSpace<VM>

source§

fn as_space(&self) -> &dyn Space<VM>

source§

fn as_sft(&self) -> &(dyn SFT + Sync + 'static)

source§

fn get_page_resource(&self) -> &dyn PageResource<VM>

source§

fn maybe_get_page_resource_mut(&mut self) -> Option<&mut dyn PageResource<VM>>

Get a mutable reference to the underlying page resource, or None if the space does not have a page resource.
source§

fn common(&self) -> &CommonSpace<VM>

source§

fn get_gc_trigger(&self) -> &GCTrigger<VM>

source§

fn initialize_sft(&self, _sft_map: &mut dyn SFTMap)

Initialize entires in SFT map for the space. This is called when the Space object has a non-moving address, as we will use the address to set sft. Currently after we create a boxed plan, spaces in the plan have a non-moving address.
source§

fn release_multiple_pages(&mut self, _start: Address)

source§

fn in_space(&self, object: ObjectReference) -> bool

source§

fn address_in_space(&self, _start: Address) -> bool

source§

fn get_name(&self) -> &'static str

source§

fn reserved_pages(&self) -> usize

source§

fn verify_side_metadata_sanity( &self, side_metadata_sanity_checker: &mut SideMetadataSanity )

Ensure that the current space’s metadata context does not have any issues. Panics with a suitable message if any issue is detected. It also initialises the sanity maps which will then be used if the extreme_assertions feature is active. Internally this calls verify_metadata_context() from util::metadata::sanity Read more
source§

fn enumerate_objects(&self, _enumerator: &mut dyn ObjectEnumerator)

Enumerate objects in the current space. Read more
source§

fn will_oom_on_acquire(&self, tls: VMThread, size: usize) -> bool

A check for the obvious out-of-memory case: if the requested size is larger than the heap size, it is definitely an OOM. We would like to identify that, and allows the binding to deal with OOM. Without this check, we will attempt to allocate from the page resource. If the requested size is unrealistically large (such as usize::MAX), it breaks the assumptions of our implementation of page resource, vm map, etc. This check prevents that, and allows us to handle the OOM case. Each allocator that may request an arbitrary size should call this method before acquring memory from the space. For example, bump pointer allocator and large object allocator need to call this method. On the other hand, allocators that only allocate memory in fixed size blocks do not need to call this method. An allocator should call this method before doing any computation on the size to avoid arithmatic overflow. If we have to do computation in the allocation fastpath and overflow happens there, there is nothing we can do about it. Return a boolean to indicate if we will be out of memory, determined by the check.
source§

fn acquire(&self, tls: VMThread, pages: usize) -> Address

source§

fn grow_space(&self, start: Address, bytes: usize, new_chunk: bool)

This is called after we get result from page resources. The space may tap into the hook to monitor heap growth. The call is made from within the page resources’ critical region, immediately before yielding the lock. Read more
source§

fn ensure_mapped(&self)

Ensure this space is marked as mapped – used when the space is already mapped (e.g. for a vm image which is externally mmapped.)
source§

fn available_physical_pages(&self) -> usize

Return the number of physical pages available.
source§

fn get_descriptor(&self) -> SpaceDescriptor

source§

fn set_copy_for_sft_trace(&mut self, _semantics: Option<CopySemantics>)

What copy semantic we should use for this space if we copy objects from this space. This is only needed for plans that use SFTProcessEdges

Auto Trait Implementations§

§

impl<VM> !RefUnwindSafe for MallocSpace<VM>

§

impl<VM> Send for MallocSpace<VM>

§

impl<VM> Sync for MallocSpace<VM>

§

impl<VM> Unpin for MallocSpace<VM>
where VM: Unpin,

§

impl<VM> !UnwindSafe for MallocSpace<VM>

Blanket Implementations§

source§

impl<T> Any for T
where T: 'static + ?Sized,

source§

fn type_id(&self) -> TypeId

Gets the TypeId of self. Read more
source§

impl<T> Borrow<T> for T
where T: ?Sized,

source§

fn borrow(&self) -> &T

Immutably borrows from an owned value. Read more
source§

impl<T> BorrowMut<T> for T
where T: ?Sized,

source§

fn borrow_mut(&mut self) -> &mut T

Mutably borrows from an owned value. Read more
§

impl<T> Downcast for T
where T: Any,

§

fn into_any(self: Box<T>) -> Box<dyn Any>

Convert Box<dyn Trait> (where Trait: Downcast) to Box<dyn Any>. Box<dyn Any> can then be further downcast into Box<ConcreteType> where ConcreteType implements Trait.
§

fn into_any_rc(self: Rc<T>) -> Rc<dyn Any>

Convert Rc<Trait> (where Trait: Downcast) to Rc<Any>. Rc<Any> can then be further downcast into Rc<ConcreteType> where ConcreteType implements Trait.
§

fn as_any(&self) -> &(dyn Any + 'static)

Convert &Trait (where Trait: Downcast) to &Any. This is needed since Rust cannot generate &Any’s vtable from &Trait’s.
§

fn as_any_mut(&mut self) -> &mut (dyn Any + 'static)

Convert &mut Trait (where Trait: Downcast) to &Any. This is needed since Rust cannot generate &mut Any’s vtable from &mut Trait’s.
§

impl<T> DowncastSync for T
where T: Any + Send + Sync,

§

fn into_any_arc(self: Arc<T>) -> Arc<dyn Any + Send + Sync>

Convert Arc<Trait> (where Trait: Downcast) to Arc<Any>. Arc<Any> can then be further downcast into Arc<ConcreteType> where ConcreteType implements Trait.
source§

impl<T> From<T> for T

source§

fn from(t: T) -> T

Returns the argument unchanged.

source§

impl<T, U> Into<U> for T
where U: From<T>,

source§

fn into(self) -> U

Calls U::from(self).

That is, this conversion is whatever the implementation of From<T> for U chooses to do.

source§

impl<T> IntoEither for T

source§

fn into_either(self, into_left: bool) -> Either<Self, Self>

Converts self into a Left variant of Either<Self, Self> if into_left is true. Converts self into a Right variant of Either<Self, Self> otherwise. Read more
source§

fn into_either_with<F>(self, into_left: F) -> Either<Self, Self>
where F: FnOnce(&Self) -> bool,

Converts self into a Left variant of Either<Self, Self> if into_left(&self) returns true. Converts self into a Right variant of Either<Self, Self> otherwise. Read more
§

impl<T> Pointable for T

§

const ALIGN: usize = _

The alignment of pointer.
§

type Init = T

The type for initializers.
§

unsafe fn init(init: <T as Pointable>::Init) -> usize

Initializes a with the given initializer. Read more
§

unsafe fn deref<'a>(ptr: usize) -> &'a T

Dereferences the given pointer. Read more
§

unsafe fn deref_mut<'a>(ptr: usize) -> &'a mut T

Mutably dereferences the given pointer. Read more
§

unsafe fn drop(ptr: usize)

Drops the object pointed to by the given pointer. Read more
source§

impl<T, U> TryFrom<U> for T
where U: Into<T>,

§

type Error = Infallible

The type returned in the event of a conversion error.
source§

fn try_from(value: U) -> Result<T, <T as TryFrom<U>>::Error>

Performs the conversion.
source§

impl<T, U> TryInto<U> for T
where U: TryFrom<T>,

§

type Error = <U as TryFrom<T>>::Error

The type returned in the event of a conversion error.
source§

fn try_into(self) -> Result<U, <U as TryFrom<T>>::Error>

Performs the conversion.