mmtk/util/heap/layout/mmapper/csm/
mod.rsuse crate::util::constants::LOG_BYTES_IN_PAGE;
use crate::util::conversions::raw_is_aligned;
use crate::util::heap::layout::vm_layout::*;
use crate::util::heap::layout::Mmapper;
use crate::util::memory::*;
use crate::util::Address;
use bytemuck::NoUninit;
use std::{io::Result, sync::Mutex};
mod byte_map_storage;
#[cfg(target_pointer_width = "64")]
mod two_level_storage;
#[cfg(target_pointer_width = "32")]
type ChosenMapStateStorage = byte_map_storage::ByteMapStateStorage;
#[cfg(target_pointer_width = "64")]
type ChosenMapStateStorage = two_level_storage::TwoLevelStateStorage;
#[derive(Clone, Copy)]
struct ChunkRange {
start: Address,
bytes: usize,
}
impl ChunkRange {
fn new_aligned(start: Address, bytes: usize) -> Self {
debug_assert!(
start.is_aligned_to(BYTES_IN_CHUNK),
"start {start} is not chunk-aligned"
);
debug_assert!(
raw_is_aligned(bytes, BYTES_IN_CHUNK),
"bytes 0x{bytes:x} is not a multiple of chunks"
);
Self { start, bytes }
}
fn new_unaligned(start: Address, bytes: usize) -> Self {
let start_aligned = start.align_down(BYTES_IN_CHUNK);
let end_aligned = (start + bytes).align_up(BYTES_IN_CHUNK);
Self::new_aligned(start_aligned, end_aligned - start_aligned)
}
fn limit(&self) -> Address {
self.start + self.bytes
}
fn is_within_limit(&self, limit: Address) -> bool {
self.limit() <= limit
}
fn is_empty(&self) -> bool {
self.bytes == 0
}
fn is_single_chunk(&self) -> bool {
self.bytes == BYTES_IN_CHUNK
}
}
impl std::fmt::Display for ChunkRange {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "{}-{}", self.start, self.limit())
}
}
trait MapStateStorage {
fn log_mappable_bytes(&self) -> u8;
fn get_state(&self, chunk: Address) -> MapState;
fn bulk_set_state(&self, range: ChunkRange, state: MapState);
fn bulk_transition_state<F>(&self, range: ChunkRange, update_fn: F) -> Result<()>
where
F: FnMut(ChunkRange, MapState) -> Result<Option<MapState>>;
}
pub struct ChunkStateMmapper {
transition_lock: Mutex<()>,
storage: ChosenMapStateStorage,
}
impl ChunkStateMmapper {
pub fn new() -> Self {
Self {
transition_lock: Default::default(),
storage: ChosenMapStateStorage::new(),
}
}
#[cfg(test)]
fn get_state(&self, chunk: Address) -> MapState {
self.storage.get_state(chunk)
}
}
impl Mmapper for ChunkStateMmapper {
fn log_granularity(&self) -> u8 {
LOG_BYTES_IN_CHUNK as u8
}
fn log_mappable_bytes(&self) -> u8 {
self.storage.log_mappable_bytes()
}
fn mark_as_mapped(&self, start: Address, bytes: usize) {
let _guard = self.transition_lock.lock().unwrap();
let range = ChunkRange::new_unaligned(start, bytes);
self.storage.bulk_set_state(range, MapState::Mapped);
}
fn quarantine_address_range(
&self,
start: Address,
pages: usize,
strategy: MmapStrategy,
anno: &MmapAnnotation,
) -> Result<()> {
let _guard = self.transition_lock.lock().unwrap();
let bytes = pages << LOG_BYTES_IN_PAGE;
let range = ChunkRange::new_unaligned(start, bytes);
self.storage
.bulk_transition_state(range, |group_range, state| {
let group_start: Address = group_range.start;
let group_bytes = group_range.bytes;
match state {
MapState::Unmapped => {
trace!("Trying to quarantine {group_range}");
mmap_noreserve(group_start, group_bytes, strategy, anno)?;
Ok(Some(MapState::Quarantined))
}
MapState::Quarantined => {
trace!("Already quarantine {group_range}");
Ok(None)
}
MapState::Mapped => {
trace!("Already mapped {group_range}");
Ok(None)
}
}
})
}
fn ensure_mapped(
&self,
start: Address,
pages: usize,
strategy: MmapStrategy,
anno: &MmapAnnotation,
) -> Result<()> {
let _guard = self.transition_lock.lock().unwrap();
let bytes = pages << LOG_BYTES_IN_PAGE;
let range = ChunkRange::new_unaligned(start, bytes);
self.storage
.bulk_transition_state(range, |group_range, state| {
let group_start: Address = group_range.start;
let group_bytes = group_range.bytes;
match state {
MapState::Unmapped => {
dzmmap_noreplace(group_start, group_bytes, strategy, anno)?;
Ok(Some(MapState::Mapped))
}
MapState::Quarantined => {
unsafe { dzmmap(group_start, group_bytes, strategy, anno) }?;
Ok(Some(MapState::Mapped))
}
MapState::Mapped => Ok(None),
}
})
}
fn is_mapped_address(&self, addr: Address) -> bool {
self.storage.get_state(addr) == MapState::Mapped
}
}
#[repr(u8)]
#[derive(Copy, Clone, PartialEq, Eq, Debug, NoUninit)]
enum MapState {
Unmapped,
Quarantined,
Mapped,
}
#[cfg(test)]
mod tests {
use super::*;
use crate::mmap_anno_test;
use crate::util::constants::LOG_BYTES_IN_PAGE;
use crate::util::memory;
use crate::util::test_util::CHUNK_STATE_MMAPPER_TEST_REGION;
use crate::util::test_util::{serial_test, with_cleanup};
use crate::util::{conversions, Address};
const FIXED_ADDRESS: Address = CHUNK_STATE_MMAPPER_TEST_REGION.start;
const MAX_BYTES: usize = CHUNK_STATE_MMAPPER_TEST_REGION.size;
fn pages_to_chunks_up(pages: usize) -> usize {
conversions::raw_align_up(pages, BYTES_IN_CHUNK) / BYTES_IN_CHUNK
}
fn get_chunk_map_state(mmapper: &ChunkStateMmapper, chunk: Address) -> MapState {
chunk.is_aligned_to(BYTES_IN_CHUNK);
mmapper.get_state(chunk)
}
#[test]
fn ensure_mapped_1page() {
serial_test(|| {
let pages = 1;
with_cleanup(
|| {
let mmapper = ChunkStateMmapper::new();
mmapper
.ensure_mapped(FIXED_ADDRESS, pages, MmapStrategy::TEST, mmap_anno_test!())
.unwrap();
let chunks = pages_to_chunks_up(pages);
for i in 0..chunks {
assert_eq!(
get_chunk_map_state(
&mmapper,
FIXED_ADDRESS + (i << LOG_BYTES_IN_CHUNK)
),
MapState::Mapped
);
}
},
|| {
memory::munmap(FIXED_ADDRESS, MAX_BYTES).unwrap();
},
)
})
}
#[test]
fn ensure_mapped_1chunk() {
serial_test(|| {
let pages = BYTES_IN_CHUNK >> LOG_BYTES_IN_PAGE as usize;
with_cleanup(
|| {
let mmapper = ChunkStateMmapper::new();
mmapper
.ensure_mapped(FIXED_ADDRESS, pages, MmapStrategy::TEST, mmap_anno_test!())
.unwrap();
let chunks = pages_to_chunks_up(pages);
for i in 0..chunks {
assert_eq!(
get_chunk_map_state(
&mmapper,
FIXED_ADDRESS + (i << LOG_BYTES_IN_CHUNK)
),
MapState::Mapped
);
}
},
|| {
memory::munmap(FIXED_ADDRESS, MAX_BYTES).unwrap();
},
)
})
}
#[test]
fn ensure_mapped_more_than_1chunk() {
serial_test(|| {
let pages = (BYTES_IN_CHUNK + BYTES_IN_CHUNK / 2) >> LOG_BYTES_IN_PAGE as usize;
with_cleanup(
|| {
let mmapper = ChunkStateMmapper::new();
mmapper
.ensure_mapped(FIXED_ADDRESS, pages, MmapStrategy::TEST, mmap_anno_test!())
.unwrap();
let chunks = pages_to_chunks_up(pages);
for i in 0..chunks {
assert_eq!(
get_chunk_map_state(
&mmapper,
FIXED_ADDRESS + (i << LOG_BYTES_IN_CHUNK)
),
MapState::Mapped
);
}
},
|| {
memory::munmap(FIXED_ADDRESS, MAX_BYTES).unwrap();
},
)
})
}
}