use atomic::Ordering;
use super::BlockList;
use super::MarkSweepSpace;
use crate::util::constants::LOG_BYTES_IN_PAGE;
use crate::util::heap::chunk_map::*;
use crate::util::linear_scan::Region;
use crate::util::object_enum::BlockMayHaveObjects;
use crate::vm::ObjectModel;
use crate::{
util::{
metadata::side_metadata::SideMetadataSpec, Address, ObjectReference, OpaquePointer,
VMThread,
},
vm::VMBinding,
};
use std::num::NonZeroUsize;
#[derive(Clone, Copy, PartialOrd, PartialEq)]
#[repr(transparent)]
pub struct Block(NonZeroUsize);
impl std::fmt::Debug for Block {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(f, "Block(0x{:x})", self.0)
}
}
impl Region for Block {
const LOG_BYTES: usize = 16;
fn from_aligned_address(address: Address) -> Self {
debug_assert!(address.is_aligned_to(Self::BYTES));
debug_assert!(!address.is_zero());
Self(unsafe { NonZeroUsize::new_unchecked(address.as_usize()) })
}
fn start(&self) -> Address {
unsafe { Address::from_usize(self.0.get()) }
}
}
impl BlockMayHaveObjects for Block {
fn may_have_objects(&self) -> bool {
self.get_state() != BlockState::Unallocated
}
}
impl Block {
pub const LOG_PAGES: usize = Self::LOG_BYTES - LOG_BYTES_IN_PAGE as usize;
pub const METADATA_SPECS: [SideMetadataSpec; 7] = [
Self::MARK_TABLE,
Self::NEXT_BLOCK_TABLE,
Self::PREV_BLOCK_TABLE,
Self::FREE_LIST_TABLE,
Self::SIZE_TABLE,
Self::BLOCK_LIST_TABLE,
Self::TLS_TABLE,
];
pub const MARK_TABLE: SideMetadataSpec =
crate::util::metadata::side_metadata::spec_defs::MS_BLOCK_MARK;
pub const NEXT_BLOCK_TABLE: SideMetadataSpec =
crate::util::metadata::side_metadata::spec_defs::MS_BLOCK_NEXT;
pub const PREV_BLOCK_TABLE: SideMetadataSpec =
crate::util::metadata::side_metadata::spec_defs::MS_BLOCK_PREV;
pub const FREE_LIST_TABLE: SideMetadataSpec =
crate::util::metadata::side_metadata::spec_defs::MS_FREE;
#[cfg(feature = "malloc_native_mimalloc")]
pub const LOCAL_FREE_LIST_TABLE: SideMetadataSpec =
crate::util::metadata::side_metadata::spec_defs::MS_LOCAL_FREE;
#[cfg(feature = "malloc_native_mimalloc")]
pub const THREAD_FREE_LIST_TABLE: SideMetadataSpec =
crate::util::metadata::side_metadata::spec_defs::MS_THREAD_FREE;
pub const SIZE_TABLE: SideMetadataSpec =
crate::util::metadata::side_metadata::spec_defs::MS_BLOCK_SIZE;
pub const BLOCK_LIST_TABLE: SideMetadataSpec =
crate::util::metadata::side_metadata::spec_defs::MS_BLOCK_LIST;
pub const TLS_TABLE: SideMetadataSpec =
crate::util::metadata::side_metadata::spec_defs::MS_BLOCK_TLS;
pub fn load_free_list(&self) -> Address {
unsafe { Address::from_usize(Block::FREE_LIST_TABLE.load::<usize>(self.start())) }
}
pub fn store_free_list(&self, free_list: Address) {
unsafe { Block::FREE_LIST_TABLE.store::<usize>(self.start(), free_list.as_usize()) }
}
#[cfg(feature = "malloc_native_mimalloc")]
pub fn load_local_free_list(&self) -> Address {
unsafe { Address::from_usize(Block::LOCAL_FREE_LIST_TABLE.load::<usize>(self.start())) }
}
#[cfg(feature = "malloc_native_mimalloc")]
pub fn store_local_free_list(&self, local_free: Address) {
unsafe { Block::LOCAL_FREE_LIST_TABLE.store::<usize>(self.start(), local_free.as_usize()) }
}
#[cfg(feature = "malloc_native_mimalloc")]
pub fn load_thread_free_list(&self) -> Address {
unsafe {
Address::from_usize(
Block::THREAD_FREE_LIST_TABLE.load_atomic::<usize>(self.start(), Ordering::SeqCst),
)
}
}
#[cfg(feature = "malloc_native_mimalloc")]
pub fn store_thread_free_list(&self, thread_free: Address) {
unsafe {
Block::THREAD_FREE_LIST_TABLE.store::<usize>(self.start(), thread_free.as_usize())
}
}
#[cfg(feature = "malloc_native_mimalloc")]
pub fn cas_thread_free_list(&self, old_thread_free: Address, new_thread_free: Address) -> bool {
Block::THREAD_FREE_LIST_TABLE
.compare_exchange_atomic::<usize>(
self.start(),
old_thread_free.as_usize(),
new_thread_free.as_usize(),
Ordering::SeqCst,
Ordering::SeqCst,
)
.is_ok()
}
pub fn load_prev_block(&self) -> Option<Block> {
let prev = unsafe { Block::PREV_BLOCK_TABLE.load::<usize>(self.start()) };
NonZeroUsize::new(prev).map(Block)
}
pub fn load_next_block(&self) -> Option<Block> {
let next = unsafe { Block::NEXT_BLOCK_TABLE.load::<usize>(self.start()) };
NonZeroUsize::new(next).map(Block)
}
pub fn store_next_block(&self, next: Block) {
unsafe {
Block::NEXT_BLOCK_TABLE.store::<usize>(self.start(), next.start().as_usize());
}
}
pub fn clear_next_block(&self) {
unsafe {
Block::NEXT_BLOCK_TABLE.store::<usize>(self.start(), 0);
}
}
pub fn store_prev_block(&self, prev: Block) {
unsafe {
Block::PREV_BLOCK_TABLE.store::<usize>(self.start(), prev.start().as_usize());
}
}
pub fn clear_prev_block(&self) {
unsafe {
Block::PREV_BLOCK_TABLE.store::<usize>(self.start(), 0);
}
}
pub fn store_block_list(&self, block_list: &BlockList) {
let block_list_usize: usize = block_list as *const BlockList as usize;
unsafe {
Block::BLOCK_LIST_TABLE.store::<usize>(self.start(), block_list_usize);
}
}
pub fn load_block_list(&self) -> *mut BlockList {
let block_list =
Block::BLOCK_LIST_TABLE.load_atomic::<usize>(self.start(), Ordering::SeqCst);
block_list as *mut BlockList
}
pub fn load_block_cell_size(&self) -> usize {
Block::SIZE_TABLE.load_atomic::<usize>(self.start(), Ordering::SeqCst)
}
pub fn store_block_cell_size(&self, size: usize) {
debug_assert_ne!(size, 0);
unsafe { Block::SIZE_TABLE.store::<usize>(self.start(), size) }
}
pub fn store_tls(&self, tls: VMThread) {
let tls_usize: usize = tls.0.to_address().as_usize();
unsafe { Block::TLS_TABLE.store(self.start(), tls_usize) }
}
pub fn load_tls(&self) -> VMThread {
let tls = Block::TLS_TABLE.load_atomic::<usize>(self.start(), Ordering::SeqCst);
VMThread(OpaquePointer::from_address(unsafe {
Address::from_usize(tls)
}))
}
pub fn has_free_cells(&self) -> bool {
!self.load_free_list().is_zero()
}
pub fn get_state(&self) -> BlockState {
let byte = Self::MARK_TABLE.load_atomic::<u8>(self.start(), Ordering::SeqCst);
byte.into()
}
pub fn set_state(&self, state: BlockState) {
let state = u8::from(state);
Self::MARK_TABLE.store_atomic::<u8>(self.start(), state, Ordering::SeqCst);
}
pub fn attempt_release<VM: VMBinding>(self, space: &MarkSweepSpace<VM>) -> bool {
match self.get_state() {
BlockState::Unallocated => unreachable!(),
BlockState::Unmarked => {
let block_list = self.load_block_list();
unsafe { &mut *block_list }.remove(self);
space.release_block(self);
true
}
BlockState::Marked => {
false
}
}
}
pub fn sweep<VM: VMBinding>(&self) {
if cfg!(feature = "malloc_native_mimalloc") {
unimplemented!()
}
if !VM::USE_ALLOCATION_OFFSET
&& VM::MAX_ALIGNMENT == VM::MIN_ALIGNMENT
&& crate::util::conversions::raw_is_aligned(
self.load_block_cell_size(),
VM::MAX_ALIGNMENT,
)
&& VM::VMObjectModel::UNIFIED_OBJECT_REFERENCE_ADDRESS
{
self.simple_sweep::<VM>()
} else {
self.naive_brute_force_sweep::<VM>()
}
}
fn simple_sweep<VM: VMBinding>(&self) {
let cell_size = self.load_block_cell_size();
debug_assert_ne!(cell_size, 0);
let mut cell = self.start();
let mut last = unsafe { Address::zero() };
while cell + cell_size <= self.start() + Block::BYTES {
let potential_object = unsafe { ObjectReference::from_raw_address_unchecked(cell) };
if !VM::VMObjectModel::LOCAL_MARK_BIT_SPEC
.is_marked::<VM>(potential_object, Ordering::SeqCst)
{
#[cfg(feature = "vo_bit")]
crate::util::metadata::vo_bit::unset_vo_bit_nocheck(potential_object);
unsafe {
cell.store::<Address>(last);
}
last = cell;
}
cell += cell_size;
}
self.store_free_list(last);
}
fn naive_brute_force_sweep<VM: VMBinding>(&self) {
use crate::util::constants::MIN_OBJECT_SIZE;
let cell_size = self.load_block_cell_size();
let mut cell = self.start();
let mut last = Address::ZERO;
let mut cursor = cell;
debug!("Sweep block {:?}, cell size {}", self, cell_size);
while cell + cell_size <= self.end() {
let potential_object_ref = unsafe {
ObjectReference::from_raw_address_unchecked(
cursor + VM::VMObjectModel::OBJECT_REF_OFFSET_LOWER_BOUND,
)
};
trace!(
"{:?}: cell = {}, last cell in free list = {}, cursor = {}, potential object = {}",
self,
cell,
last,
cursor,
potential_object_ref
);
if VM::VMObjectModel::LOCAL_MARK_BIT_SPEC
.is_marked::<VM>(potential_object_ref, Ordering::SeqCst)
{
debug!("{:?} Live cell: {}", self, cell);
cell += cell_size;
cursor = cell;
} else {
cursor += MIN_OBJECT_SIZE;
if cursor >= cell + cell_size {
debug!(
"{:?} Free cell: {}, last cell in freelist is {}",
self, cell, last
);
#[cfg(feature = "vo_bit")]
crate::util::metadata::vo_bit::bzero_vo_bit(cell, cell_size);
debug_assert!(last.is_zero() || (last >= self.start() && last < self.end()));
unsafe {
cell.store::<Address>(last);
}
last = cell;
cell += cell_size;
debug_assert_eq!(cursor, cell);
}
}
}
self.store_free_list(last);
}
pub fn chunk(&self) -> Chunk {
Chunk::from_unaligned_address(self.start())
}
pub fn init(&self) {
self.set_state(BlockState::Unmarked);
}
pub fn deinit(&self) {
self.set_state(BlockState::Unallocated);
}
}
#[derive(Debug, PartialEq, Clone, Copy)]
pub enum BlockState {
Unallocated,
Unmarked,
Marked,
}
impl BlockState {
const MARK_UNALLOCATED: u8 = 0;
const MARK_UNMARKED: u8 = u8::MAX;
const MARK_MARKED: u8 = u8::MAX - 1;
}
impl From<u8> for BlockState {
fn from(state: u8) -> Self {
match state {
Self::MARK_UNALLOCATED => BlockState::Unallocated,
Self::MARK_UNMARKED => BlockState::Unmarked,
Self::MARK_MARKED => BlockState::Marked,
_ => unreachable!(),
}
}
}
impl From<BlockState> for u8 {
fn from(state: BlockState) -> Self {
match state {
BlockState::Unallocated => BlockState::MARK_UNALLOCATED,
BlockState::Unmarked => BlockState::MARK_UNMARKED,
BlockState::Marked => BlockState::MARK_MARKED,
}
}
}