mmtk/util/heap/layout/mmapper/csm/
byte_map_storage.rs1use super::MapState;
2use crate::util::heap::layout::mmapper::csm::ChunkRange;
3use crate::util::heap::layout::mmapper::csm::MapStateStorage;
4use crate::util::rust_util::rev_group::RevisitableGroupByForIterator;
5use crate::util::Address;
6
7use crate::util::heap::layout::vm_layout::*;
8use std::fmt;
9use std::sync::atomic::Ordering;
10use std::sync::Mutex;
11
12use atomic::Atomic;
13use std::io::Result;
14
15const LOG_MAPPABLE_BYTES: usize = 32;
20
21const MMAP_NUM_CHUNKS: usize = 1 << (LOG_MAPPABLE_BYTES - LOG_BYTES_IN_CHUNK);
23
24pub struct ByteMapStateStorage {
28 lock: Mutex<()>,
29 mapped: [Atomic<MapState>; MMAP_NUM_CHUNKS],
30}
31
32impl fmt::Debug for ByteMapStateStorage {
33 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
34 write!(f, "ByteMapStateStorage({})", MMAP_NUM_CHUNKS)
35 }
36}
37
38impl MapStateStorage for ByteMapStateStorage {
39 fn log_mappable_bytes(&self) -> u8 {
40 LOG_MAPPABLE_BYTES as u8
41 }
42
43 fn get_state(&self, chunk: Address) -> MapState {
44 let index = chunk >> LOG_BYTES_IN_CHUNK;
45 let Some(slot) = self.mapped.get(index) else {
46 return MapState::Unmapped;
47 };
48 slot.load(Ordering::Relaxed)
49 }
50
51 fn bulk_set_state(&self, range: ChunkRange, state: MapState) {
52 if range.is_empty() {
53 return;
54 }
55
56 let index_start = range.start >> LOG_BYTES_IN_CHUNK;
57 let index_limit = range.limit() >> LOG_BYTES_IN_CHUNK;
58 for index in index_start..index_limit {
59 self.mapped[index].store(state, Ordering::Relaxed);
60 }
61 }
62
63 fn bulk_transition_state<F>(&self, range: ChunkRange, mut update_fn: F) -> Result<()>
64 where
65 F: FnMut(ChunkRange, MapState) -> Result<Option<MapState>>,
66 {
67 if range.is_empty() {
68 return Ok(());
69 }
70
71 if range.is_single_chunk() {
72 let chunk = range.start;
73 let index = chunk >> LOG_BYTES_IN_CHUNK;
74 let slot: &Atomic<MapState> = &self.mapped[index];
75 let state = slot.load(Ordering::Relaxed);
76 if let Some(new_state) = update_fn(range, state)? {
77 slot.store(new_state, Ordering::Relaxed);
78 }
79 return Ok(());
80 }
81
82 let index_start = range.start >> LOG_BYTES_IN_CHUNK;
83 let index_limit = range.limit() >> LOG_BYTES_IN_CHUNK;
84
85 let mut group_start = index_start;
86 for group in self.mapped.as_slice()[index_start..index_limit]
87 .iter()
88 .revisitable_group_by(|s| s.load(Ordering::Relaxed))
89 {
90 let state = group.key;
91 let group_end = group_start + group.len;
92 let group_start_addr =
93 unsafe { Address::from_usize(group_start << LOG_BYTES_IN_CHUNK) };
94 let group_bytes = group.len << LOG_BYTES_IN_CHUNK;
95 let group_range = ChunkRange::new_aligned(group_start_addr, group_bytes);
96 if let Some(new_state) = update_fn(group_range, state)? {
97 for index in group_start..group_end {
98 self.mapped[index].store(new_state, Ordering::Relaxed);
99 }
100 }
101 group_start = group_end;
102 }
103
104 Ok(())
105 }
106}
107
108impl ByteMapStateStorage {
109 pub fn new() -> Self {
110 ByteMapStateStorage {
111 lock: Mutex::new(()),
112 mapped: [const { Atomic::new(MapState::Unmapped) }; MMAP_NUM_CHUNKS],
113 }
114 }
115}
116
117impl Default for ByteMapStateStorage {
118 fn default() -> Self {
119 Self::new()
120 }
121}