mmtk/util/heap/layout/mmapper/csm/
mod.rs1use crate::util::constants::LOG_BYTES_IN_PAGE;
2use crate::util::conversions::raw_is_aligned;
3use crate::util::heap::layout::vm_layout::*;
4use crate::util::heap::layout::Mmapper;
5use crate::util::os::*;
6use crate::util::Address;
7use bytemuck::NoUninit;
8use std::sync::Mutex;
9
10mod byte_map_storage;
11#[cfg(target_pointer_width = "64")]
12mod two_level_storage;
13
14#[cfg(target_pointer_width = "32")]
15type ChosenMapStateStorage = byte_map_storage::ByteMapStateStorage;
16#[cfg(target_pointer_width = "64")]
17type ChosenMapStateStorage = two_level_storage::TwoLevelStateStorage;
18
19#[derive(Clone, Copy)]
23struct ChunkRange {
24 start: Address,
25 bytes: usize,
26}
27
28impl ChunkRange {
29 fn new_aligned(start: Address, bytes: usize) -> Self {
30 debug_assert!(
31 start.is_aligned_to(BYTES_IN_CHUNK),
32 "start {start} is not chunk-aligned"
33 );
34 debug_assert!(
35 raw_is_aligned(bytes, BYTES_IN_CHUNK),
36 "bytes 0x{bytes:x} is not a multiple of chunks"
37 );
38 Self { start, bytes }
39 }
40
41 fn new_unaligned(start: Address, bytes: usize) -> Self {
42 let start_aligned = start.align_down(BYTES_IN_CHUNK);
43 let end_aligned = (start + bytes).align_up(BYTES_IN_CHUNK);
44 Self::new_aligned(start_aligned, end_aligned - start_aligned)
45 }
46
47 fn limit(&self) -> Address {
48 self.start + self.bytes
49 }
50
51 fn is_within_limit(&self, limit: Address) -> bool {
52 self.limit() <= limit
53 }
54
55 fn is_empty(&self) -> bool {
56 self.bytes == 0
57 }
58
59 fn is_single_chunk(&self) -> bool {
60 self.bytes == BYTES_IN_CHUNK
61 }
62}
63
64impl std::fmt::Display for ChunkRange {
65 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
66 write!(f, "{}-{}", self.start, self.limit())
67 }
68}
69
70trait MapStateStorage {
73 fn log_mappable_bytes(&self) -> u8;
75
76 fn get_state(&self, chunk: Address) -> MapState;
81
82 fn bulk_set_state(&self, range: ChunkRange, state: MapState);
84
85 fn bulk_transition_state<F>(&self, range: ChunkRange, update_fn: F) -> MmapResult<()>
98 where
99 F: FnMut(ChunkRange, MapState) -> MmapResult<Option<MapState>>;
100}
101
102pub struct ChunkStateMmapper {
109 transition_lock: Mutex<()>,
111 storage: ChosenMapStateStorage,
113}
114
115impl ChunkStateMmapper {
116 pub fn new() -> Self {
117 Self {
118 transition_lock: Default::default(),
119 storage: ChosenMapStateStorage::new(),
120 }
121 }
122
123 #[cfg(test)]
124 fn get_state(&self, chunk: Address) -> MapState {
125 self.storage.get_state(chunk)
126 }
127}
128
129impl Mmapper for ChunkStateMmapper {
130 fn log_granularity(&self) -> u8 {
131 LOG_BYTES_IN_CHUNK as u8
132 }
133
134 fn log_mappable_bytes(&self) -> u8 {
135 self.storage.log_mappable_bytes()
136 }
137
138 fn mark_as_mapped(&self, start: Address, bytes: usize) {
139 let _guard = self.transition_lock.lock().unwrap();
140
141 let range = ChunkRange::new_unaligned(start, bytes);
142 self.storage.bulk_set_state(range, MapState::Mapped);
143 }
144
145 fn quarantine_address_range(
146 &self,
147 start: Address,
148 pages: usize,
149 huge_page_option: HugePageSupport,
150 anno: &MmapAnnotation,
151 ) -> MmapResult<()> {
152 let _guard = self.transition_lock.lock().unwrap();
153
154 let bytes = pages << LOG_BYTES_IN_PAGE;
155 let range = ChunkRange::new_unaligned(start, bytes);
156
157 self.storage
158 .bulk_transition_state(range, |group_range, state| {
159 let group_start: Address = group_range.start;
160 let group_bytes = group_range.bytes;
161
162 match state {
163 MapState::Unmapped => {
164 trace!("Trying to quarantine {group_range}");
165 let mmap_strategy = MmapStrategy::QUARANTINE.huge_page(huge_page_option);
166 OS::dzmmap(group_start, group_bytes, mmap_strategy, anno)?;
167 Ok(Some(MapState::Quarantined))
168 }
169 MapState::Quarantined => {
170 trace!("Already quarantine {group_range}");
171 Ok(None)
172 }
173 MapState::Mapped => {
174 trace!("Already mapped {group_range}");
175 Ok(None)
176 }
177 }
178 })
179 }
180
181 fn quarantine_address_range_anywhere(
182 &self,
183 pages: usize,
184 huge_page_option: HugePageSupport,
185 anno: &MmapAnnotation,
186 ) -> std::io::Result<Address> {
187 let _guard = self.transition_lock.lock().unwrap();
188
189 let bytes = pages << LOG_BYTES_IN_PAGE;
190 let align = BYTES_IN_CHUNK;
191 let mmap_strategy = MmapStrategy::QUARANTINE.huge_page(huge_page_option);
192 let start = OS::dzmmap_anywhere(bytes, align, mmap_strategy, anno)?;
193 let range = ChunkRange::new_aligned(start, bytes);
194
195 let log_mappable = self.storage.log_mappable_bytes() as u32;
196 let mappable_limit = if log_mappable < usize::BITS {
197 unsafe { Address::from_usize(1usize << log_mappable) }
198 } else {
199 Address::MAX
200 };
201 if !range.is_within_limit(mappable_limit) {
202 let _ = OS::munmap(start, bytes);
204 return Err(std::io::Error::other(
205 "quarantined side metadata range is outside the mappable address space",
206 ));
207 }
208
209 self.storage.bulk_set_state(range, MapState::Quarantined);
210 Ok(start)
211 }
212
213 fn ensure_mapped(
214 &self,
215 start: Address,
216 pages: usize,
217 huge_page_option: HugePageSupport,
218 prot: MmapProtection,
219 anno: &MmapAnnotation,
220 ) -> MmapResult<()> {
221 let _guard = self.transition_lock.lock().unwrap();
222
223 let bytes = pages << LOG_BYTES_IN_PAGE;
224 let range = ChunkRange::new_unaligned(start, bytes);
225
226 let mmap_strategy = MmapStrategy::default()
227 .huge_page(huge_page_option)
228 .prot(prot)
229 .reserve(true);
230
231 self.storage
232 .bulk_transition_state(range, |group_range, state| {
233 let group_start: Address = group_range.start;
234 let group_bytes = group_range.bytes;
235
236 match state {
237 MapState::Unmapped => {
238 OS::dzmmap(group_start, group_bytes, mmap_strategy.replace(false), anno)?;
239 Ok(Some(MapState::Mapped))
240 }
241 MapState::Quarantined => {
242 OS::dzmmap(group_start, group_bytes, mmap_strategy.replace(true), anno)?;
243 Ok(Some(MapState::Mapped))
244 }
245 MapState::Mapped => Ok(None),
246 }
247 })
248 }
249
250 fn is_mapped_address(&self, addr: Address) -> bool {
251 self.storage.get_state(addr) == MapState::Mapped
252 }
253}
254
255#[repr(u8)]
257#[derive(Copy, Clone, PartialEq, Eq, Debug, NoUninit)]
258enum MapState {
259 Unmapped,
261 Quarantined,
264 Mapped,
266}
267
268#[cfg(test)]
269mod tests {
270 use super::*;
271 use crate::mmap_anno_test;
272 use crate::util::constants::LOG_BYTES_IN_PAGE;
273 use crate::util::test_util::CHUNK_STATE_MMAPPER_TEST_REGION;
274 use crate::util::test_util::{serial_test, with_cleanup};
275 use crate::util::{conversions, Address};
276
277 const FIXED_ADDRESS: Address = CHUNK_STATE_MMAPPER_TEST_REGION.start;
278 const MAX_BYTES: usize = CHUNK_STATE_MMAPPER_TEST_REGION.size;
279
280 fn pages_to_chunks_up(pages: usize) -> usize {
281 conversions::raw_align_up(pages, BYTES_IN_CHUNK) / BYTES_IN_CHUNK
282 }
283
284 fn get_chunk_map_state(mmapper: &ChunkStateMmapper, chunk: Address) -> MapState {
285 chunk.is_aligned_to(BYTES_IN_CHUNK);
286 mmapper.get_state(chunk)
287 }
288
289 #[test]
290 fn ensure_mapped_1page() {
291 serial_test(|| {
292 let pages = 1;
293 with_cleanup(
294 || {
295 let mmapper = ChunkStateMmapper::new();
296 mmapper
297 .ensure_mapped(
298 FIXED_ADDRESS,
299 pages,
300 HugePageSupport::No,
301 MmapProtection::ReadWrite,
302 mmap_anno_test!(),
303 )
304 .unwrap();
305
306 let chunks = pages_to_chunks_up(pages);
307 for i in 0..chunks {
308 assert_eq!(
309 get_chunk_map_state(
310 &mmapper,
311 FIXED_ADDRESS + (i << LOG_BYTES_IN_CHUNK)
312 ),
313 MapState::Mapped
314 );
315 }
316 },
317 || {
318 OS::munmap(FIXED_ADDRESS, MAX_BYTES).unwrap();
319 },
320 )
321 })
322 }
323 #[test]
324 fn ensure_mapped_1chunk() {
325 serial_test(|| {
326 let pages = BYTES_IN_CHUNK >> LOG_BYTES_IN_PAGE as usize;
327 with_cleanup(
328 || {
329 let mmapper = ChunkStateMmapper::new();
330 mmapper
331 .ensure_mapped(
332 FIXED_ADDRESS,
333 pages,
334 HugePageSupport::No,
335 MmapProtection::ReadWrite,
336 mmap_anno_test!(),
337 )
338 .unwrap();
339
340 let chunks = pages_to_chunks_up(pages);
341 for i in 0..chunks {
342 assert_eq!(
343 get_chunk_map_state(
344 &mmapper,
345 FIXED_ADDRESS + (i << LOG_BYTES_IN_CHUNK)
346 ),
347 MapState::Mapped
348 );
349 }
350 },
351 || {
352 OS::munmap(FIXED_ADDRESS, MAX_BYTES).unwrap();
353 },
354 )
355 })
356 }
357
358 #[test]
359 fn ensure_mapped_more_than_1chunk() {
360 serial_test(|| {
361 let pages = (BYTES_IN_CHUNK + BYTES_IN_CHUNK / 2) >> LOG_BYTES_IN_PAGE as usize;
362 with_cleanup(
363 || {
364 let mmapper = ChunkStateMmapper::new();
365 mmapper
366 .ensure_mapped(
367 FIXED_ADDRESS,
368 pages,
369 HugePageSupport::No,
370 MmapProtection::ReadWrite,
371 mmap_anno_test!(),
372 )
373 .unwrap();
374
375 let chunks = pages_to_chunks_up(pages);
376 for i in 0..chunks {
377 assert_eq!(
378 get_chunk_map_state(
379 &mmapper,
380 FIXED_ADDRESS + (i << LOG_BYTES_IN_CHUNK)
381 ),
382 MapState::Mapped
383 );
384 }
385 },
386 || {
387 OS::munmap(FIXED_ADDRESS, MAX_BYTES).unwrap();
388 },
389 )
390 })
391 }
392}