mmtk/util/heap/layout/mmapper/csm/
mod.rs1use crate::util::constants::LOG_BYTES_IN_PAGE;
2use crate::util::conversions::raw_is_aligned;
3use crate::util::heap::layout::vm_layout::*;
4use crate::util::heap::layout::Mmapper;
5use crate::util::memory::*;
6use crate::util::Address;
7use bytemuck::NoUninit;
8use std::{io::Result, sync::Mutex};
9
10mod byte_map_storage;
11#[cfg(target_pointer_width = "64")]
12mod two_level_storage;
13
14#[cfg(target_pointer_width = "32")]
15type ChosenMapStateStorage = byte_map_storage::ByteMapStateStorage;
16#[cfg(target_pointer_width = "64")]
17type ChosenMapStateStorage = two_level_storage::TwoLevelStateStorage;
18
19#[derive(Clone, Copy)]
23struct ChunkRange {
24 start: Address,
25 bytes: usize,
26}
27
28impl ChunkRange {
29 fn new_aligned(start: Address, bytes: usize) -> Self {
30 debug_assert!(
31 start.is_aligned_to(BYTES_IN_CHUNK),
32 "start {start} is not chunk-aligned"
33 );
34 debug_assert!(
35 raw_is_aligned(bytes, BYTES_IN_CHUNK),
36 "bytes 0x{bytes:x} is not a multiple of chunks"
37 );
38 Self { start, bytes }
39 }
40
41 fn new_unaligned(start: Address, bytes: usize) -> Self {
42 let start_aligned = start.align_down(BYTES_IN_CHUNK);
43 let end_aligned = (start + bytes).align_up(BYTES_IN_CHUNK);
44 Self::new_aligned(start_aligned, end_aligned - start_aligned)
45 }
46
47 fn limit(&self) -> Address {
48 self.start + self.bytes
49 }
50
51 fn is_within_limit(&self, limit: Address) -> bool {
52 self.limit() <= limit
53 }
54
55 fn is_empty(&self) -> bool {
56 self.bytes == 0
57 }
58
59 fn is_single_chunk(&self) -> bool {
60 self.bytes == BYTES_IN_CHUNK
61 }
62}
63
64impl std::fmt::Display for ChunkRange {
65 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
66 write!(f, "{}-{}", self.start, self.limit())
67 }
68}
69
70trait MapStateStorage {
73 fn log_mappable_bytes(&self) -> u8;
75
76 fn get_state(&self, chunk: Address) -> MapState;
81
82 fn bulk_set_state(&self, range: ChunkRange, state: MapState);
84
85 fn bulk_transition_state<F>(&self, range: ChunkRange, update_fn: F) -> Result<()>
98 where
99 F: FnMut(ChunkRange, MapState) -> Result<Option<MapState>>;
100}
101
102pub struct ChunkStateMmapper {
109 transition_lock: Mutex<()>,
111 storage: ChosenMapStateStorage,
113}
114
115impl ChunkStateMmapper {
116 pub fn new() -> Self {
117 Self {
118 transition_lock: Default::default(),
119 storage: ChosenMapStateStorage::new(),
120 }
121 }
122
123 #[cfg(test)]
124 fn get_state(&self, chunk: Address) -> MapState {
125 self.storage.get_state(chunk)
126 }
127}
128
129impl Mmapper for ChunkStateMmapper {
130 fn log_granularity(&self) -> u8 {
131 LOG_BYTES_IN_CHUNK as u8
132 }
133
134 fn log_mappable_bytes(&self) -> u8 {
135 self.storage.log_mappable_bytes()
136 }
137
138 fn mark_as_mapped(&self, start: Address, bytes: usize) {
139 let _guard = self.transition_lock.lock().unwrap();
140
141 let range = ChunkRange::new_unaligned(start, bytes);
142 self.storage.bulk_set_state(range, MapState::Mapped);
143 }
144
145 fn quarantine_address_range(
146 &self,
147 start: Address,
148 pages: usize,
149 strategy: MmapStrategy,
150 anno: &MmapAnnotation,
151 ) -> Result<()> {
152 let _guard = self.transition_lock.lock().unwrap();
153
154 let bytes = pages << LOG_BYTES_IN_PAGE;
155 let range = ChunkRange::new_unaligned(start, bytes);
156
157 self.storage
158 .bulk_transition_state(range, |group_range, state| {
159 let group_start: Address = group_range.start;
160 let group_bytes = group_range.bytes;
161
162 match state {
163 MapState::Unmapped => {
164 trace!("Trying to quarantine {group_range}");
165 mmap_noreserve(group_start, group_bytes, strategy, anno)?;
166 Ok(Some(MapState::Quarantined))
167 }
168 MapState::Quarantined => {
169 trace!("Already quarantine {group_range}");
170 Ok(None)
171 }
172 MapState::Mapped => {
173 trace!("Already mapped {group_range}");
174 Ok(None)
175 }
176 }
177 })
178 }
179
180 fn ensure_mapped(
181 &self,
182 start: Address,
183 pages: usize,
184 strategy: MmapStrategy,
185 anno: &MmapAnnotation,
186 ) -> Result<()> {
187 let _guard = self.transition_lock.lock().unwrap();
188
189 let bytes = pages << LOG_BYTES_IN_PAGE;
190 let range = ChunkRange::new_unaligned(start, bytes);
191
192 self.storage
193 .bulk_transition_state(range, |group_range, state| {
194 let group_start: Address = group_range.start;
195 let group_bytes = group_range.bytes;
196
197 match state {
198 MapState::Unmapped => {
199 dzmmap_noreplace(group_start, group_bytes, strategy, anno)?;
200 Ok(Some(MapState::Mapped))
201 }
202 MapState::Quarantined => {
203 unsafe { dzmmap(group_start, group_bytes, strategy, anno) }?;
204 Ok(Some(MapState::Mapped))
205 }
206 MapState::Mapped => Ok(None),
207 }
208 })
209 }
210
211 fn is_mapped_address(&self, addr: Address) -> bool {
212 self.storage.get_state(addr) == MapState::Mapped
213 }
214}
215
216#[repr(u8)]
218#[derive(Copy, Clone, PartialEq, Eq, Debug, NoUninit)]
219enum MapState {
220 Unmapped,
222 Quarantined,
225 Mapped,
227}
228
229#[cfg(test)]
230mod tests {
231 use super::*;
232 use crate::mmap_anno_test;
233 use crate::util::constants::LOG_BYTES_IN_PAGE;
234 use crate::util::memory;
235 use crate::util::test_util::CHUNK_STATE_MMAPPER_TEST_REGION;
236 use crate::util::test_util::{serial_test, with_cleanup};
237 use crate::util::{conversions, Address};
238
239 const FIXED_ADDRESS: Address = CHUNK_STATE_MMAPPER_TEST_REGION.start;
240 const MAX_BYTES: usize = CHUNK_STATE_MMAPPER_TEST_REGION.size;
241
242 fn pages_to_chunks_up(pages: usize) -> usize {
243 conversions::raw_align_up(pages, BYTES_IN_CHUNK) / BYTES_IN_CHUNK
244 }
245
246 fn get_chunk_map_state(mmapper: &ChunkStateMmapper, chunk: Address) -> MapState {
247 chunk.is_aligned_to(BYTES_IN_CHUNK);
248 mmapper.get_state(chunk)
249 }
250
251 #[test]
252 fn ensure_mapped_1page() {
253 serial_test(|| {
254 let pages = 1;
255 with_cleanup(
256 || {
257 let mmapper = ChunkStateMmapper::new();
258 mmapper
259 .ensure_mapped(FIXED_ADDRESS, pages, MmapStrategy::TEST, mmap_anno_test!())
260 .unwrap();
261
262 let chunks = pages_to_chunks_up(pages);
263 for i in 0..chunks {
264 assert_eq!(
265 get_chunk_map_state(
266 &mmapper,
267 FIXED_ADDRESS + (i << LOG_BYTES_IN_CHUNK)
268 ),
269 MapState::Mapped
270 );
271 }
272 },
273 || {
274 memory::munmap(FIXED_ADDRESS, MAX_BYTES).unwrap();
275 },
276 )
277 })
278 }
279 #[test]
280 fn ensure_mapped_1chunk() {
281 serial_test(|| {
282 let pages = BYTES_IN_CHUNK >> LOG_BYTES_IN_PAGE as usize;
283 with_cleanup(
284 || {
285 let mmapper = ChunkStateMmapper::new();
286 mmapper
287 .ensure_mapped(FIXED_ADDRESS, pages, MmapStrategy::TEST, mmap_anno_test!())
288 .unwrap();
289
290 let chunks = pages_to_chunks_up(pages);
291 for i in 0..chunks {
292 assert_eq!(
293 get_chunk_map_state(
294 &mmapper,
295 FIXED_ADDRESS + (i << LOG_BYTES_IN_CHUNK)
296 ),
297 MapState::Mapped
298 );
299 }
300 },
301 || {
302 memory::munmap(FIXED_ADDRESS, MAX_BYTES).unwrap();
303 },
304 )
305 })
306 }
307
308 #[test]
309 fn ensure_mapped_more_than_1chunk() {
310 serial_test(|| {
311 let pages = (BYTES_IN_CHUNK + BYTES_IN_CHUNK / 2) >> LOG_BYTES_IN_PAGE as usize;
312 with_cleanup(
313 || {
314 let mmapper = ChunkStateMmapper::new();
315 mmapper
316 .ensure_mapped(FIXED_ADDRESS, pages, MmapStrategy::TEST, mmap_anno_test!())
317 .unwrap();
318
319 let chunks = pages_to_chunks_up(pages);
320 for i in 0..chunks {
321 assert_eq!(
322 get_chunk_map_state(
323 &mmapper,
324 FIXED_ADDRESS + (i << LOG_BYTES_IN_CHUNK)
325 ),
326 MapState::Mapped
327 );
328 }
329 },
330 || {
331 memory::munmap(FIXED_ADDRESS, MAX_BYTES).unwrap();
332 },
333 )
334 })
335 }
336}