use super::map::CreateFreeListResult;
use super::map::VMMap;
use crate::mmtk::SFT_MAP;
use crate::util::conversions;
use crate::util::freelist::FreeList;
use crate::util::heap::layout::heap_parameters::*;
use crate::util::heap::layout::vm_layout::*;
use crate::util::heap::space_descriptor::SpaceDescriptor;
use crate::util::int_array_freelist::IntArrayFreeList;
use crate::util::Address;
use std::cell::UnsafeCell;
use std::sync::atomic::{AtomicUsize, Ordering};
use std::sync::{Mutex, MutexGuard};
pub struct Map32 {
sync: Mutex<()>,
inner: UnsafeCell<Map32Inner>,
}
#[doc(hidden)]
pub struct Map32Inner {
prev_link: Vec<i32>,
next_link: Vec<i32>,
region_map: IntArrayFreeList,
global_page_map: IntArrayFreeList,
shared_discontig_fl_count: usize,
total_available_discontiguous_chunks: usize,
finalized: bool,
descriptor_map: Vec<SpaceDescriptor>,
cumulative_committed_pages: AtomicUsize,
}
unsafe impl Send for Map32 {}
unsafe impl Sync for Map32 {}
impl Map32 {
pub fn new() -> Self {
let max_chunks = vm_layout().max_chunks();
Map32 {
inner: UnsafeCell::new(Map32Inner {
prev_link: vec![0; max_chunks],
next_link: vec![0; max_chunks],
region_map: IntArrayFreeList::new(max_chunks, max_chunks as _, 1),
global_page_map: IntArrayFreeList::new(1, 1, MAX_SPACES),
shared_discontig_fl_count: 0,
total_available_discontiguous_chunks: 0,
finalized: false,
descriptor_map: vec![SpaceDescriptor::UNINITIALIZED; max_chunks],
cumulative_committed_pages: AtomicUsize::new(0),
}),
sync: Mutex::new(()),
}
}
}
impl std::ops::Deref for Map32 {
type Target = Map32Inner;
fn deref(&self) -> &Self::Target {
unsafe { &*self.inner.get() }
}
}
impl VMMap for Map32 {
fn insert(&self, start: Address, extent: usize, descriptor: SpaceDescriptor) {
let self_mut: &mut Map32Inner = unsafe { self.mut_self() };
let mut e = 0;
while e < extent {
let index = (start + e).chunk_index();
assert!(
self.descriptor_map[index].is_empty(),
"Conflicting virtual address request"
);
debug!(
"Set descriptor {:?} for Chunk {}",
descriptor,
conversions::chunk_index_to_address(index)
);
self_mut.descriptor_map[index] = descriptor;
e += BYTES_IN_CHUNK;
}
}
fn create_freelist(&self, _start: Address) -> CreateFreeListResult {
let free_list = Box::new(IntArrayFreeList::from_parent(
&self.global_page_map,
self.get_discontig_freelist_pr_ordinal() as _,
));
CreateFreeListResult {
free_list,
space_displacement: 0,
}
}
fn create_parent_freelist(
&self,
_start: Address,
units: usize,
grain: i32,
) -> CreateFreeListResult {
let free_list = Box::new(IntArrayFreeList::new(units, grain, 1));
CreateFreeListResult {
free_list,
space_displacement: 0,
}
}
unsafe fn allocate_contiguous_chunks(
&self,
descriptor: SpaceDescriptor,
chunks: usize,
head: Address,
_maybe_freelist: Option<&mut dyn FreeList>,
) -> Address {
let (_sync, self_mut) = self.mut_self_with_sync();
let chunk = self_mut.region_map.alloc(chunks as _);
debug_assert!(chunk != 0);
if chunk == -1 {
return Address::zero();
}
self_mut.total_available_discontiguous_chunks -= chunks;
let rtn = conversions::chunk_index_to_address(chunk as _);
self.insert(rtn, chunks << LOG_BYTES_IN_CHUNK, descriptor);
if head.is_zero() {
debug_assert!(self.next_link[chunk as usize] == 0);
} else {
self_mut.next_link[chunk as usize] = head.chunk_index() as _;
self_mut.prev_link[head.chunk_index()] = chunk;
}
debug_assert!(self.prev_link[chunk as usize] == 0);
rtn
}
fn get_next_contiguous_region(&self, start: Address) -> Address {
debug_assert!(start == conversions::chunk_align_down(start));
let chunk = start.chunk_index();
if chunk == 0 || self.next_link[chunk] == 0 {
unsafe { Address::zero() }
} else {
let a = self.next_link[chunk];
conversions::chunk_index_to_address(a as _)
}
}
fn get_contiguous_region_chunks(&self, start: Address) -> usize {
debug_assert!(start == conversions::chunk_align_down(start));
let chunk = start.chunk_index();
self.region_map.size(chunk as i32) as _
}
fn get_contiguous_region_size(&self, start: Address) -> usize {
self.get_contiguous_region_chunks(start) << LOG_BYTES_IN_CHUNK
}
fn get_available_discontiguous_chunks(&self) -> usize {
self.total_available_discontiguous_chunks
}
fn get_chunk_consumer_count(&self) -> usize {
self.shared_discontig_fl_count
}
#[allow(clippy::while_immutable_condition)]
fn free_all_chunks(&self, any_chunk: Address) {
debug!("free_all_chunks: {}", any_chunk);
let (_sync, self_mut) = self.mut_self_with_sync();
debug_assert!(any_chunk == conversions::chunk_align_down(any_chunk));
if !any_chunk.is_zero() {
let chunk = any_chunk.chunk_index();
while self_mut.next_link[chunk] != 0 {
let x = self_mut.next_link[chunk];
self.free_contiguous_chunks_no_lock(x);
}
while self_mut.prev_link[chunk] != 0 {
let x = self_mut.prev_link[chunk];
self.free_contiguous_chunks_no_lock(x);
}
self.free_contiguous_chunks_no_lock(chunk as _);
}
}
unsafe fn free_contiguous_chunks(&self, start: Address) -> usize {
debug!("free_contiguous_chunks: {}", start);
let (_sync, _) = self.mut_self_with_sync();
debug_assert!(start == conversions::chunk_align_down(start));
let chunk = start.chunk_index();
self.free_contiguous_chunks_no_lock(chunk as _)
}
fn finalize_static_space_map(
&self,
from: Address,
to: Address,
on_discontig_start_determined: &mut dyn FnMut(Address),
) {
let self_mut: &mut Map32Inner = unsafe { self.mut_self() };
let start_address = from;
let first_chunk = start_address.chunk_index();
let last_chunk = to.chunk_index();
let unavail_start_chunk = last_chunk + 1;
let trailing_chunks = vm_layout().max_chunks() - unavail_start_chunk;
let pages = (1 + last_chunk - first_chunk) * PAGES_IN_CHUNK;
self_mut.global_page_map.resize_freelist(pages, pages as _);
on_discontig_start_determined(start_address);
self_mut.region_map.alloc(first_chunk as _); for _ in first_chunk..=last_chunk {
self_mut.region_map.alloc(1);
}
let alloced_chunk = self_mut.region_map.alloc(trailing_chunks as _);
debug_assert!(
alloced_chunk == unavail_start_chunk as i32,
"{} != {}",
alloced_chunk,
unavail_start_chunk
);
let mut first_page = 0;
for chunk_index in first_chunk..=last_chunk {
self_mut.total_available_discontiguous_chunks += 1;
self_mut.region_map.free(chunk_index as _, false); self_mut.global_page_map.set_uncoalescable(first_page);
let alloced_pages = self_mut.global_page_map.alloc(PAGES_IN_CHUNK as _); debug_assert!(alloced_pages == first_page);
first_page += PAGES_IN_CHUNK as i32;
}
self_mut.finalized = true;
}
fn is_finalized(&self) -> bool {
self.finalized
}
fn get_descriptor_for_address(&self, address: Address) -> SpaceDescriptor {
let index = address.chunk_index();
self.descriptor_map[index]
}
fn add_to_cumulative_committed_pages(&self, pages: usize) {
self.cumulative_committed_pages
.fetch_add(pages, Ordering::Relaxed);
}
}
impl Map32 {
#[allow(clippy::mut_from_ref)]
unsafe fn mut_self(&self) -> &mut Map32Inner {
&mut *self.inner.get()
}
fn mut_self_with_sync(&self) -> (MutexGuard<()>, &mut Map32Inner) {
let guard = self.sync.lock().unwrap();
(guard, unsafe { self.mut_self() })
}
fn free_contiguous_chunks_no_lock(&self, chunk: i32) -> usize {
unsafe {
let chunks = self.mut_self().region_map.free(chunk, false);
self.mut_self().total_available_discontiguous_chunks += chunks as usize;
let next = self.next_link[chunk as usize];
let prev = self.prev_link[chunk as usize];
if next != 0 {
self.mut_self().prev_link[next as usize] = prev
};
if prev != 0 {
self.mut_self().next_link[prev as usize] = next
};
self.mut_self().prev_link[chunk as usize] = 0;
self.mut_self().next_link[chunk as usize] = 0;
for offset in 0..chunks {
let index = (chunk + offset) as usize;
let chunk_start = conversions::chunk_index_to_address(index);
debug!("Clear descriptor for Chunk {}", chunk_start);
self.mut_self().descriptor_map[index] = SpaceDescriptor::UNINITIALIZED;
SFT_MAP.clear(chunk_start);
}
chunks as _
}
}
fn get_discontig_freelist_pr_ordinal(&self) -> usize {
let self_mut: &mut Map32Inner = unsafe { self.mut_self() };
self_mut.shared_discontig_fl_count += 1;
self.shared_discontig_fl_count
}
}
impl Default for Map32 {
fn default() -> Self {
Self::new()
}
}