use std::sync::Arc;
use crate::policy::marksweepspace::native_ms::*;
use crate::util::alloc::allocator;
use crate::util::alloc::Allocator;
use crate::util::linear_scan::Region;
use crate::util::Address;
use crate::util::VMThread;
use crate::vm::VMBinding;
use super::allocator::AllocatorContext;
#[repr(C)]
pub struct FreeListAllocator<VM: VMBinding> {
pub tls: VMThread,
space: &'static MarkSweepSpace<VM>,
context: Arc<AllocatorContext<VM>>,
pub available_blocks: BlockLists,
pub available_blocks_stress: BlockLists,
pub unswept_blocks: BlockLists,
pub consumed_blocks: BlockLists,
}
impl<VM: VMBinding> Allocator<VM> for FreeListAllocator<VM> {
fn get_tls(&self) -> VMThread {
self.tls
}
fn get_space(&self) -> &'static dyn crate::policy::space::Space<VM> {
self.space
}
fn get_context(&self) -> &AllocatorContext<VM> {
&self.context
}
fn alloc(&mut self, size: usize, align: usize, offset: usize) -> Address {
debug_assert!(
size <= MAX_BIN_SIZE,
"Alloc request for {} bytes is too big.",
size
);
debug_assert!(align <= VM::MAX_ALIGNMENT);
debug_assert!(align >= VM::MIN_ALIGNMENT);
if let Some(block) = self.find_free_block_local(size, align) {
let cell = self.block_alloc(block);
if !cell.is_zero() {
debug_assert!(
!(*self.context.options.precise_stress
&& self.context.options.is_stress_test_gc_enabled())
);
let res = allocator::align_allocation::<VM>(cell, align, offset);
#[cfg(debug_assertions)]
{
let cell_size = block.load_block_cell_size();
debug_assert!(
res + size <= cell + cell_size,
"Allocating (size = {}, align = {}, offset = {}) to the cell {} of size {}, but the end of the allocation region {} is beyond the cell end {}",
size, align, offset, cell, cell_size, res + size, cell + cell_size
);
}
return res;
}
}
self.alloc_slow(size, align, offset)
}
fn alloc_slow_once(&mut self, size: usize, align: usize, offset: usize) -> Address {
if let Some(block) = self.acquire_global_block(size, align, false) {
let addr = self.block_alloc(block);
allocator::align_allocation::<VM>(addr, align, offset)
} else {
Address::ZERO
}
}
fn does_thread_local_allocation(&self) -> bool {
true
}
fn get_thread_local_buffer_granularity(&self) -> usize {
Block::BYTES
}
fn alloc_slow_once_precise_stress(
&mut self,
size: usize,
align: usize,
offset: usize,
need_poll: bool,
) -> Address {
trace!("allow slow precise stress s={}", size);
if need_poll {
self.acquire_global_block(0, 0, true);
}
if let Some(block) = self.find_free_block_stress(size, align) {
let cell = self.block_alloc(block);
allocator::align_allocation::<VM>(cell, align, offset)
} else {
Address::ZERO
}
}
fn on_mutator_destroy(&mut self) {
let mut global = self.space.get_abandoned_block_lists().lock().unwrap();
self.abandon_blocks(&mut global);
}
}
impl<VM: VMBinding> FreeListAllocator<VM> {
pub(crate) fn new(
tls: VMThread,
space: &'static MarkSweepSpace<VM>,
context: Arc<AllocatorContext<VM>>,
) -> Self {
FreeListAllocator {
tls,
space,
context,
available_blocks: new_empty_block_lists(),
available_blocks_stress: new_empty_block_lists(),
unswept_blocks: new_empty_block_lists(),
consumed_blocks: new_empty_block_lists(),
}
}
fn block_alloc(&mut self, block: Block) -> Address {
let cell = block.load_free_list();
if cell.is_zero() {
return cell; }
let next_cell = unsafe { cell.load::<Address>() };
unsafe { cell.store::<Address>(Address::ZERO) };
debug_assert!(
next_cell.is_zero() || block.includes_address(next_cell),
"next_cell {} is not in {:?}",
next_cell,
block
);
block.store_free_list(next_cell);
let cell_size = block.load_block_cell_size();
crate::util::memory::zero(cell, cell_size);
#[cfg(debug_assertions)]
{
let mut cursor = cell;
while cursor < cell + cell_size {
debug_assert_eq!(unsafe { cursor.load::<usize>() }, 0);
cursor += crate::util::constants::BYTES_IN_ADDRESS;
}
}
cell
}
fn find_free_block_stress(&mut self, size: usize, align: usize) -> Option<Block> {
Self::find_free_block_with(
&mut self.available_blocks_stress,
&mut self.consumed_blocks,
size,
align,
)
.or_else(|| self.recycle_local_blocks(size, align, true))
.or_else(|| self.acquire_global_block(size, align, true))
}
fn find_free_block_local(&mut self, size: usize, align: usize) -> Option<Block> {
Self::find_free_block_with(
&mut self.available_blocks,
&mut self.consumed_blocks,
size,
align,
)
.or_else(|| self.recycle_local_blocks(size, align, false))
}
fn find_free_block_with(
available_blocks: &mut BlockLists,
consumed_blocks: &mut BlockLists,
size: usize,
align: usize,
) -> Option<Block> {
let bin = mi_bin::<VM>(size, align);
debug_assert!(bin <= MAX_BIN);
let available = &mut available_blocks[bin];
debug_assert!(available.size >= size);
if !available.is_empty() {
let mut cursor = available.first;
while let Some(block) = cursor {
if block.has_free_cells() {
return Some(block);
}
available.pop();
consumed_blocks.get_mut(bin).unwrap().push(block);
cursor = available.first;
}
}
debug_assert!(available_blocks[bin].is_empty());
None
}
fn add_to_available_blocks(&mut self, bin: usize, block: Block, stress: bool) {
if stress {
debug_assert!(*self.context.options.precise_stress);
self.available_blocks_stress[bin].push(block);
} else {
self.available_blocks[bin].push(block);
}
}
fn recycle_local_blocks(
&mut self,
size: usize,
align: usize,
_stress_test: bool,
) -> Option<Block> {
if cfg!(feature = "eager_sweeping") {
None
} else {
loop {
let bin = mi_bin::<VM>(size, align);
debug_assert!(self.available_blocks[bin].is_empty()); if let Some(block) = self.unswept_blocks.get_mut(bin).unwrap().pop() {
block.sweep::<VM>();
if block.has_free_cells() {
self.add_to_available_blocks(
bin,
block,
self.context.options.is_stress_test_gc_enabled(),
);
return Some(block);
} else {
self.consumed_blocks.get_mut(bin).unwrap().push(block);
}
} else {
return None;
}
}
}
}
fn acquire_global_block(
&mut self,
size: usize,
align: usize,
stress_test: bool,
) -> Option<Block> {
let bin = mi_bin::<VM>(size, align);
loop {
match self.space.acquire_block(self.tls, size, align) {
crate::policy::marksweepspace::native_ms::BlockAcquireResult::Exhausted => {
debug!("Acquire global block: None");
return None;
}
crate::policy::marksweepspace::native_ms::BlockAcquireResult::Fresh(block) => {
debug!("Acquire global block: Fresh {:?}", block);
self.add_to_available_blocks(bin, block, stress_test);
self.init_block(block, self.available_blocks[bin].size);
return Some(block);
}
crate::policy::marksweepspace::native_ms::BlockAcquireResult::AbandonedAvailable(block) => {
debug!("Acquire global block: AbandonedAvailable {:?}", block);
block.store_tls(self.tls);
if block.has_free_cells() {
self.add_to_available_blocks(bin, block, stress_test);
return Some(block);
} else {
self.consumed_blocks[bin].push(block);
}
}
crate::policy::marksweepspace::native_ms::BlockAcquireResult::AbandonedUnswept(block) => {
debug!("Acquire global block: AbandonedUnswep {:?}", block);
block.store_tls(self.tls);
block.sweep::<VM>();
if block.has_free_cells() {
self.add_to_available_blocks(bin, block, stress_test);
return Some(block);
} else {
self.consumed_blocks[bin].push(block);
}
}
}
}
}
fn init_block(&self, block: Block, cell_size: usize) {
debug_assert_ne!(cell_size, 0);
self.space.record_new_block(block);
let block_end = block.start() + Block::BYTES;
let mut old_cell = unsafe { Address::zero() };
let mut new_cell = block.start();
let final_cell = loop {
unsafe {
new_cell.store::<Address>(old_cell);
}
old_cell = new_cell;
new_cell += cell_size;
if new_cell + cell_size > block_end {
break old_cell;
};
};
block.store_free_list(final_cell);
block.store_block_cell_size(cell_size);
#[cfg(feature = "malloc_native_mimalloc")]
{
block.store_local_free_list(Address::ZERO);
block.store_thread_free_list(Address::ZERO);
}
self.store_block_tls(block);
}
#[cfg(feature = "malloc_native_mimalloc")]
fn free(&self, addr: Address) {
assert!(!addr.is_zero(), "Attempted to free zero address.");
use crate::util::ObjectReference;
let block = Block::from_unaligned_address(addr);
let block_tls = block.load_tls();
if self.tls == block_tls {
let local_free = block.load_local_free_list();
unsafe {
addr.store(local_free);
}
block.store_local_free_list(addr);
} else {
unreachable!(
"tlss don't match freeing from block {}, my tls = {:?}, block tls = {:?}",
block.start(),
self.tls,
block.load_tls()
);
}
crate::util::metadata::vo_bit::unset_vo_bit::<VM>(unsafe {
ObjectReference::from_raw_address_unchecked(addr)
})
}
fn store_block_tls(&self, block: Block) {
block.store_tls(self.tls);
}
pub(crate) fn prepare(&mut self) {}
pub(crate) fn release(&mut self) {
self.reset();
}
const ABANDON_BLOCKS_IN_RESET: bool = true;
#[cfg(not(feature = "eager_sweeping"))]
fn reset(&mut self) {
trace!("reset");
for bin in 0..MI_BIN_FULL {
let unswept = self.unswept_blocks.get_mut(bin).unwrap();
debug_assert!(!Self::ABANDON_BLOCKS_IN_RESET || unswept.is_empty());
let mut sweep_later = |list: &mut BlockList| {
list.release_blocks(self.space);
unswept.append(list);
};
sweep_later(&mut self.available_blocks[bin]);
sweep_later(&mut self.available_blocks_stress[bin]);
sweep_later(&mut self.consumed_blocks[bin]);
}
if Self::ABANDON_BLOCKS_IN_RESET {
let mut global = self.space.get_abandoned_block_lists_in_gc().lock().unwrap();
self.abandon_blocks(&mut global);
}
}
#[cfg(feature = "eager_sweeping")]
fn reset(&mut self) {
debug!("reset");
for bin in 0..MI_BIN_FULL {
self.available_blocks[bin].release_and_sweep_blocks(self.space);
self.available_blocks_stress[bin].release_and_sweep_blocks(self.space);
self.consumed_blocks[bin].release_and_sweep_blocks(self.space);
if *self.context.options.precise_stress
&& self.context.options.is_stress_test_gc_enabled()
{
debug_assert!(*self.context.options.precise_stress);
self.available_blocks_stress[bin].append(&mut self.consumed_blocks[bin]);
} else {
self.available_blocks[bin].append(&mut self.consumed_blocks[bin]);
}
assert!(self.unswept_blocks[bin].is_empty());
}
if Self::ABANDON_BLOCKS_IN_RESET {
let mut global = self.space.get_abandoned_block_lists_in_gc().lock().unwrap();
self.abandon_blocks(&mut global);
}
}
fn abandon_blocks(&mut self, global: &mut AbandonedBlockLists) {
for i in 0..MI_BIN_FULL {
let available = self.available_blocks.get_mut(i).unwrap();
if !available.is_empty() {
global.available[i].append(available);
}
let available_stress = self.available_blocks_stress.get_mut(i).unwrap();
if !available_stress.is_empty() {
global.available[i].append(available_stress);
}
let consumed = self.consumed_blocks.get_mut(i).unwrap();
if !consumed.is_empty() {
global.consumed[i].append(consumed);
}
let unswept = self.unswept_blocks.get_mut(i).unwrap();
if !unswept.is_empty() {
global.unswept[i].append(unswept);
}
}
}
}