use atomic::Atomic;
use super::metadata::*;
use crate::plan::ObjectQueue;
use crate::plan::VectorObjectQueue;
use crate::policy::sft::GCWorkerMutRef;
use crate::policy::sft::SFT;
use crate::policy::space::CommonSpace;
use crate::scheduler::GCWorkScheduler;
use crate::util::heap::gc_trigger::GCTrigger;
use crate::util::heap::PageResource;
use crate::util::malloc::library::{BYTES_IN_MALLOC_PAGE, LOG_BYTES_IN_MALLOC_PAGE};
use crate::util::malloc::malloc_ms_util::*;
use crate::util::metadata::side_metadata::{
SideMetadataContext, SideMetadataSanity, SideMetadataSpec,
};
use crate::util::metadata::MetadataSpec;
use crate::util::object_enum::ObjectEnumerator;
use crate::util::opaque_pointer::*;
use crate::util::Address;
use crate::util::ObjectReference;
use crate::util::{conversions, metadata};
use crate::vm::VMBinding;
use crate::vm::{ActivePlan, Collection, ObjectModel};
use crate::{policy::space::Space, util::heap::layout::vm_layout::BYTES_IN_CHUNK};
#[cfg(debug_assertions)]
use std::collections::HashMap;
use std::marker::PhantomData;
#[cfg(debug_assertions)]
use std::sync::atomic::AtomicU32;
use std::sync::atomic::{AtomicUsize, Ordering};
use std::sync::Arc;
#[cfg(debug_assertions)]
use std::sync::Mutex;
#[cfg(debug_assertions)]
const ASSERT_ALLOCATION: bool = false;
pub struct MallocSpace<VM: VMBinding> {
phantom: PhantomData<VM>,
active_bytes: AtomicUsize,
active_pages: AtomicUsize,
pub chunk_addr_min: Atomic<Address>,
pub chunk_addr_max: Atomic<Address>,
metadata: SideMetadataContext,
scheduler: Arc<GCWorkScheduler<VM>>,
gc_trigger: Arc<GCTrigger<VM>>,
#[cfg(debug_assertions)]
active_mem: Mutex<HashMap<Address, usize>>,
#[cfg(debug_assertions)]
pub total_work_packets: AtomicU32,
#[cfg(debug_assertions)]
pub completed_work_packets: AtomicU32,
#[cfg(debug_assertions)]
pub work_live_bytes: AtomicUsize,
}
impl<VM: VMBinding> SFT for MallocSpace<VM> {
fn name(&self) -> &str {
self.get_name()
}
fn is_live(&self, object: ObjectReference) -> bool {
is_marked::<VM>(object, Ordering::SeqCst)
}
#[cfg(feature = "object_pinning")]
fn pin_object(&self, _object: ObjectReference) -> bool {
false
}
#[cfg(feature = "object_pinning")]
fn unpin_object(&self, _object: ObjectReference) -> bool {
false
}
#[cfg(feature = "object_pinning")]
fn is_object_pinned(&self, _object: ObjectReference) -> bool {
false
}
fn is_movable(&self) -> bool {
false
}
#[cfg(feature = "sanity")]
fn is_sane(&self) -> bool {
true
}
fn is_in_space(&self, object: ObjectReference) -> bool {
is_alloced_by_malloc(object)
}
#[cfg(feature = "is_mmtk_object")]
fn is_mmtk_object(&self, addr: Address) -> Option<ObjectReference> {
debug_assert!(!addr.is_zero());
debug_assert!(!addr.is_mapped());
has_object_alloced_by_malloc(addr)
}
#[cfg(feature = "is_mmtk_object")]
fn find_object_from_internal_pointer(
&self,
ptr: Address,
max_search_bytes: usize,
) -> Option<ObjectReference> {
crate::util::metadata::vo_bit::find_object_from_internal_pointer::<VM>(
ptr,
max_search_bytes,
)
}
fn initialize_object_metadata(&self, object: ObjectReference, _alloc: bool) {
trace!("initialize_object_metadata for object {}", object);
set_vo_bit(object);
}
fn sft_trace_object(
&self,
queue: &mut VectorObjectQueue,
object: ObjectReference,
_worker: GCWorkerMutRef,
) -> ObjectReference {
self.trace_object(queue, object)
}
}
impl<VM: VMBinding> Space<VM> for MallocSpace<VM> {
fn as_space(&self) -> &dyn Space<VM> {
self
}
fn as_sft(&self) -> &(dyn SFT + Sync + 'static) {
self
}
fn get_page_resource(&self) -> &dyn PageResource<VM> {
unreachable!()
}
fn maybe_get_page_resource_mut(&mut self) -> Option<&mut dyn PageResource<VM>> {
None
}
fn common(&self) -> &CommonSpace<VM> {
unreachable!()
}
fn get_gc_trigger(&self) -> &GCTrigger<VM> {
self.gc_trigger.as_ref()
}
fn initialize_sft(&self, _sft_map: &mut dyn crate::policy::sft_map::SFTMap) {
}
fn release_multiple_pages(&mut self, _start: Address) {
unreachable!()
}
#[allow(clippy::let_and_return)]
fn in_space(&self, object: ObjectReference) -> bool {
let ret = is_alloced_by_malloc(object);
#[cfg(debug_assertions)]
if ASSERT_ALLOCATION {
let addr = object.to_object_start::<VM>();
let active_mem = self.active_mem.lock().unwrap();
if ret {
debug_assert!(
*active_mem.get(&addr).unwrap() != 0,
"active mem check failed for {} (object {}) - was freed",
addr,
object
);
} else {
debug_assert!(
(!active_mem.contains_key(&addr))
|| (active_mem.contains_key(&addr) && *active_mem.get(&addr).unwrap() == 0),
"mem check failed for {} (object {}): allocated = {}, size = {:?}",
addr,
object,
active_mem.contains_key(&addr),
if active_mem.contains_key(&addr) {
active_mem.get(&addr)
} else {
None
}
);
}
}
ret
}
fn address_in_space(&self, _start: Address) -> bool {
unreachable!("We do not know if an address is in malloc space. Use in_space() to check if an object is in malloc space.")
}
fn get_name(&self) -> &'static str {
"MallocSpace"
}
#[allow(clippy::assertions_on_constants)]
fn reserved_pages(&self) -> usize {
use crate::util::constants::LOG_BYTES_IN_PAGE;
debug_assert!(LOG_BYTES_IN_MALLOC_PAGE >= LOG_BYTES_IN_PAGE);
let data_pages = self.active_pages.load(Ordering::SeqCst)
<< (LOG_BYTES_IN_MALLOC_PAGE - LOG_BYTES_IN_PAGE);
let meta_pages = self.metadata.calculate_reserved_pages(data_pages);
data_pages + meta_pages
}
fn verify_side_metadata_sanity(&self, side_metadata_sanity_checker: &mut SideMetadataSanity) {
side_metadata_sanity_checker
.verify_metadata_context(std::any::type_name::<Self>(), &self.metadata)
}
fn enumerate_objects(&self, _enumerator: &mut dyn ObjectEnumerator) {
unimplemented!()
}
}
use crate::scheduler::GCWorker;
use crate::util::copy::CopySemantics;
impl<VM: VMBinding> crate::policy::gc_work::PolicyTraceObject<VM> for MallocSpace<VM> {
fn trace_object<Q: ObjectQueue, const KIND: crate::policy::gc_work::TraceKind>(
&self,
queue: &mut Q,
object: ObjectReference,
_copy: Option<CopySemantics>,
_worker: &mut GCWorker<VM>,
) -> ObjectReference {
self.trace_object(queue, object)
}
fn may_move_objects<const KIND: crate::policy::gc_work::TraceKind>() -> bool {
false
}
}
#[allow(dead_code)]
pub const MAX_OBJECT_SIZE: usize = crate::util::constants::MAX_INT;
impl<VM: VMBinding> MallocSpace<VM> {
pub fn extend_global_side_metadata_specs(specs: &mut Vec<SideMetadataSpec>) {
if !cfg!(feature = "vo_bit") {
specs.push(crate::util::metadata::vo_bit::VO_BIT_SIDE_METADATA_SPEC);
}
specs.push(ACTIVE_CHUNK_METADATA_SPEC);
}
pub fn new(args: crate::policy::space::PlanCreateSpaceArgs<VM>) -> Self {
MallocSpace {
phantom: PhantomData,
active_bytes: AtomicUsize::new(0),
active_pages: AtomicUsize::new(0),
chunk_addr_min: Atomic::new(Address::MAX),
chunk_addr_max: Atomic::new(Address::ZERO),
metadata: SideMetadataContext {
global: args.global_side_metadata_specs.clone(),
local: metadata::extract_side_metadata(&[
MetadataSpec::OnSide(ACTIVE_PAGE_METADATA_SPEC),
MetadataSpec::OnSide(OFFSET_MALLOC_METADATA_SPEC),
*VM::VMObjectModel::LOCAL_MARK_BIT_SPEC,
]),
},
scheduler: args.scheduler.clone(),
gc_trigger: args.gc_trigger,
#[cfg(debug_assertions)]
active_mem: Mutex::new(HashMap::new()),
#[cfg(debug_assertions)]
total_work_packets: AtomicU32::new(0),
#[cfg(debug_assertions)]
completed_work_packets: AtomicU32::new(0),
#[cfg(debug_assertions)]
work_live_bytes: AtomicUsize::new(0),
}
}
fn set_page_mark(&self, start: Address, size: usize) {
let mut page = start.align_down(BYTES_IN_MALLOC_PAGE);
let mut used_pages = 0;
while page < start + size {
if compare_exchange_set_page_mark(page) {
used_pages += 1;
}
page += BYTES_IN_MALLOC_PAGE;
}
if used_pages != 0 {
self.active_pages.fetch_add(used_pages, Ordering::SeqCst);
}
}
unsafe fn unset_page_mark(&self, start: Address, size: usize) {
debug_assert!(start.is_aligned_to(BYTES_IN_MALLOC_PAGE));
debug_assert!(crate::util::conversions::raw_is_aligned(
size,
BYTES_IN_MALLOC_PAGE
));
let mut page = start;
let mut cleared_pages = 0;
while page < start + size {
if is_page_marked_unsafe(page) {
cleared_pages += 1;
unset_page_mark_unsafe(page);
}
page += BYTES_IN_MALLOC_PAGE;
}
if cleared_pages != 0 {
self.active_pages.fetch_sub(cleared_pages, Ordering::SeqCst);
}
}
pub fn alloc(&self, tls: VMThread, size: usize, align: usize, offset: usize) -> Address {
if self.get_gc_trigger().poll(false, Some(self)) {
assert!(VM::VMActivePlan::is_mutator(tls), "Polling in GC worker");
VM::VMCollection::block_for_gc(VMMutatorThread(tls));
return unsafe { Address::zero() };
}
let (address, is_offset_malloc) = alloc::<VM>(size, align, offset);
if !address.is_zero() {
let actual_size = get_malloc_usable_size(address, is_offset_malloc);
if !is_meta_space_mapped(address, actual_size) {
self.map_metadata_and_update_bound(address, actual_size);
assert!(crate::mmtk::SFT_MAP.has_sft_entry(address)); unsafe { crate::mmtk::SFT_MAP.update(self, address, actual_size) };
}
self.set_page_mark(address, actual_size);
self.active_bytes.fetch_add(actual_size, Ordering::SeqCst);
if is_offset_malloc {
set_offset_malloc_bit(address);
}
#[cfg(debug_assertions)]
if ASSERT_ALLOCATION {
debug_assert!(actual_size != 0);
self.active_mem.lock().unwrap().insert(address, actual_size);
}
}
address
}
pub fn free(&self, addr: Address) {
let offset_malloc_bit = is_offset_malloc(addr);
let bytes = get_malloc_usable_size(addr, offset_malloc_bit);
self.free_internal(addr, bytes, offset_malloc_bit);
}
fn free_internal(&self, addr: Address, bytes: usize, offset_malloc_bit: bool) {
if offset_malloc_bit {
trace!("Free memory {:x}", addr);
offset_free(addr);
unsafe { unset_offset_malloc_bit_unsafe(addr) };
} else {
let ptr = addr.to_mut_ptr();
trace!("Free memory {:?}", ptr);
unsafe {
free(ptr);
}
}
self.active_bytes.fetch_sub(bytes, Ordering::SeqCst);
#[cfg(debug_assertions)]
if ASSERT_ALLOCATION {
self.active_mem.lock().unwrap().insert(addr, 0).unwrap();
}
}
pub fn trace_object<Q: ObjectQueue>(
&self,
queue: &mut Q,
object: ObjectReference,
) -> ObjectReference {
assert!(
self.in_space(object),
"Cannot mark an object {} that was not alloced by malloc.",
object,
);
if !is_marked::<VM>(object, Ordering::Relaxed) {
let chunk_start = conversions::chunk_align_down(object.to_object_start::<VM>());
set_mark_bit::<VM>(object, Ordering::SeqCst);
set_chunk_mark(chunk_start);
queue.enqueue(object);
}
object
}
fn map_metadata_and_update_bound(&self, addr: Address, size: usize) {
map_meta_space(&self.metadata, addr, size);
{
let min_chunk_start = conversions::chunk_align_down(addr);
let mut min = self.chunk_addr_min.load(Ordering::Relaxed);
while min_chunk_start < min {
match self.chunk_addr_min.compare_exchange_weak(
min,
min_chunk_start,
Ordering::AcqRel,
Ordering::Relaxed,
) {
Ok(_) => break,
Err(x) => min = x,
}
}
}
{
let max_chunk_start = conversions::chunk_align_down(addr + size);
let mut max = self.chunk_addr_max.load(Ordering::Relaxed);
while max_chunk_start > max {
match self.chunk_addr_max.compare_exchange_weak(
max,
max_chunk_start,
Ordering::AcqRel,
Ordering::Relaxed,
) {
Ok(_) => break,
Err(x) => max = x,
}
}
}
}
pub fn prepare(&mut self) {}
pub fn release(&mut self) {
use crate::scheduler::WorkBucketStage;
let mut work_packets: Vec<Box<dyn GCWork<VM>>> = vec![];
let mut chunk = self.chunk_addr_min.load(Ordering::Relaxed);
let end = self.chunk_addr_max.load(Ordering::Relaxed) + BYTES_IN_CHUNK;
let space = unsafe { &*(self as *const Self) };
while chunk < end {
if is_chunk_mapped(chunk) && unsafe { is_chunk_marked_unsafe(chunk) } {
work_packets.push(Box::new(MSSweepChunk { ms: space, chunk }));
}
chunk += BYTES_IN_CHUNK;
}
debug!("Generated {} sweep work packets", work_packets.len());
#[cfg(debug_assertions)]
{
self.total_work_packets
.store(work_packets.len() as u32, Ordering::SeqCst);
self.completed_work_packets.store(0, Ordering::SeqCst);
self.work_live_bytes.store(0, Ordering::SeqCst);
}
self.scheduler.work_buckets[WorkBucketStage::Release].bulk_add(work_packets);
}
pub fn end_of_gc(&mut self) {}
pub fn sweep_chunk(&self, chunk_start: Address) {
match *VM::VMObjectModel::LOCAL_MARK_BIT_SPEC {
MetadataSpec::OnSide(local_mark_bit_side_spec) => {
self.sweep_chunk_mark_on_side(chunk_start, local_mark_bit_side_spec);
}
_ => {
self.sweep_chunk_mark_in_header(chunk_start);
}
}
}
fn get_malloc_addr_size(object: ObjectReference) -> (Address, bool, usize) {
let obj_start = object.to_object_start::<VM>();
let offset_malloc_bit = is_offset_malloc(obj_start);
let bytes = get_malloc_usable_size(obj_start, offset_malloc_bit);
(obj_start, offset_malloc_bit, bytes)
}
fn clean_up_empty_chunk(&self, chunk_start: Address) {
unsafe { unset_chunk_mark_unsafe(chunk_start) };
unsafe { crate::mmtk::SFT_MAP.clear(chunk_start) };
unsafe { self.unset_page_mark(chunk_start, BYTES_IN_CHUNK) };
}
fn sweep_object(&self, object: ObjectReference, empty_page_start: &mut Address) -> bool {
let (obj_start, offset_malloc, bytes) = Self::get_malloc_addr_size(object);
if !unsafe { is_marked_unsafe::<VM>(object) } {
trace!("Object {} has been allocated but not marked", object);
self.free_internal(obj_start, bytes, offset_malloc);
trace!("free object {}", object);
unsafe { unset_vo_bit_unsafe(object) };
true
} else {
if !empty_page_start.is_zero() {
let current_page = object
.to_object_start::<VM>()
.align_down(BYTES_IN_MALLOC_PAGE);
if current_page > *empty_page_start {
unsafe {
self.unset_page_mark(*empty_page_start, current_page - *empty_page_start)
};
}
}
*empty_page_start = (obj_start + bytes).align_up(BYTES_IN_MALLOC_PAGE);
false
}
}
#[cfg(debug_assertions)]
fn debug_sweep_chunk_done(&self, live_bytes_in_the_chunk: usize) {
debug!(
"Used bytes after releasing: {}",
self.active_bytes.load(Ordering::SeqCst)
);
let completed_packets = self.completed_work_packets.fetch_add(1, Ordering::SeqCst) + 1;
self.work_live_bytes
.fetch_add(live_bytes_in_the_chunk, Ordering::SeqCst);
if completed_packets == self.total_work_packets.load(Ordering::Relaxed) {
trace!(
"work_live_bytes = {}, live_bytes = {}, active_bytes = {}",
self.work_live_bytes.load(Ordering::Relaxed),
live_bytes_in_the_chunk,
self.active_bytes.load(Ordering::Relaxed)
);
debug_assert_eq!(
self.work_live_bytes.load(Ordering::Relaxed),
self.active_bytes.load(Ordering::Relaxed)
);
}
}
fn sweep_chunk_mark_on_side(&self, chunk_start: Address, mark_bit_spec: SideMetadataSpec) {
const BULK_XOR_ON_MARK_BITS: bool = false;
if BULK_XOR_ON_MARK_BITS {
#[cfg(debug_assertions)]
let mut live_bytes = 0;
debug!("Check active chunk {:?}", chunk_start);
let mut address = chunk_start;
let chunk_end = chunk_start + BYTES_IN_CHUNK;
debug_assert!(
crate::util::metadata::vo_bit::VO_BIT_SIDE_METADATA_SPEC.log_bytes_in_region
== mark_bit_spec.log_bytes_in_region,
"VO-bit and mark-bit metadata have different minimum object sizes!"
);
let bulk_load_size: usize = 128
* (1 << crate::util::metadata::vo_bit::VO_BIT_SIDE_METADATA_SPEC
.log_bytes_in_region);
let mut empty_page_start = Address::ZERO;
while address < chunk_end {
let alloc_128: u128 = unsafe {
load128(
&crate::util::metadata::vo_bit::VO_BIT_SIDE_METADATA_SPEC,
address,
)
};
let mark_128: u128 = unsafe { load128(&mark_bit_spec, address) };
if alloc_128 ^ mark_128 != 0 {
let end = address + bulk_load_size;
let bulk_load_scan = crate::util::linear_scan::ObjectIterator::<
VM,
MallocObjectSize<VM>,
false,
>::new(address, end);
for object in bulk_load_scan {
self.sweep_object(object, &mut empty_page_start);
}
} else {
if alloc_128 != 0 {
empty_page_start = address + bulk_load_size;
}
}
address += bulk_load_size;
debug_assert!(address.is_aligned_to(bulk_load_size));
}
#[cfg(debug_assertions)]
{
let chunk_linear_scan = crate::util::linear_scan::ObjectIterator::<
VM,
MallocObjectSize<VM>,
false,
>::new(chunk_start, chunk_end);
for object in chunk_linear_scan {
let (obj_start, _, bytes) = Self::get_malloc_addr_size(object);
if ASSERT_ALLOCATION {
debug_assert!(
self.active_mem.lock().unwrap().contains_key(&obj_start),
"Address {} with VO bit is not in active_mem",
obj_start
);
debug_assert_eq!(
self.active_mem.lock().unwrap().get(&obj_start),
Some(&bytes),
"Address {} size in active_mem does not match the size from malloc_usable_size",
obj_start
);
}
debug_assert!(
unsafe { is_marked_unsafe::<VM>(object) },
"Dead object = {} found after sweep",
object
);
live_bytes += bytes;
}
}
mark_bit_spec.bzero_metadata(chunk_start, BYTES_IN_CHUNK);
if empty_page_start.is_zero() {
self.clean_up_empty_chunk(chunk_start);
}
#[cfg(debug_assertions)]
self.debug_sweep_chunk_done(live_bytes);
} else {
self.sweep_each_object_in_chunk(chunk_start);
}
}
fn sweep_chunk_mark_in_header(&self, chunk_start: Address) {
self.sweep_each_object_in_chunk(chunk_start)
}
fn sweep_each_object_in_chunk(&self, chunk_start: Address) {
#[cfg(debug_assertions)]
let mut live_bytes = 0;
debug!("Check active chunk {:?}", chunk_start);
let mut empty_page_start = Address::ZERO;
let chunk_linear_scan = crate::util::linear_scan::ObjectIterator::<
VM,
MallocObjectSize<VM>,
false,
>::new(chunk_start, chunk_start + BYTES_IN_CHUNK);
for object in chunk_linear_scan {
#[cfg(debug_assertions)]
if ASSERT_ALLOCATION {
let (obj_start, _, bytes) = Self::get_malloc_addr_size(object);
debug_assert!(
self.active_mem.lock().unwrap().contains_key(&obj_start),
"Address {} with VO bit is not in active_mem",
obj_start
);
debug_assert_eq!(
self.active_mem.lock().unwrap().get(&obj_start),
Some(&bytes),
"Address {} size in active_mem does not match the size from malloc_usable_size",
obj_start
);
}
let live = !self.sweep_object(object, &mut empty_page_start);
if live {
unsafe { unset_mark_bit::<VM>(object) };
#[cfg(debug_assertions)]
{
let (_, _, bytes) = Self::get_malloc_addr_size(object);
live_bytes += bytes;
}
}
}
if empty_page_start.is_zero() {
self.clean_up_empty_chunk(chunk_start);
} else if empty_page_start < chunk_start + BYTES_IN_CHUNK {
unsafe {
self.unset_page_mark(
empty_page_start,
chunk_start + BYTES_IN_CHUNK - empty_page_start,
)
};
}
#[cfg(debug_assertions)]
self.debug_sweep_chunk_done(live_bytes);
}
}
struct MallocObjectSize<VM>(PhantomData<VM>);
impl<VM: VMBinding> crate::util::linear_scan::LinearScanObjectSize for MallocObjectSize<VM> {
fn size(object: ObjectReference) -> usize {
let (_, _, bytes) = MallocSpace::<VM>::get_malloc_addr_size(object);
bytes
}
}
use crate::scheduler::GCWork;
use crate::MMTK;
pub struct MSSweepChunk<VM: VMBinding> {
ms: &'static MallocSpace<VM>,
chunk: Address,
}
impl<VM: VMBinding> GCWork<VM> for MSSweepChunk<VM> {
fn do_work(&mut self, _worker: &mut GCWorker<VM>, _mmtk: &'static MMTK<VM>) {
self.ms.sweep_chunk(self.chunk);
}
}