mmtk/util/heap/layout/mmapper/csm/
mod.rs

1use crate::util::constants::LOG_BYTES_IN_PAGE;
2use crate::util::conversions::raw_is_aligned;
3use crate::util::heap::layout::vm_layout::*;
4use crate::util::heap::layout::Mmapper;
5use crate::util::os::*;
6use crate::util::Address;
7use bytemuck::NoUninit;
8use std::sync::Mutex;
9
10mod byte_map_storage;
11#[cfg(target_pointer_width = "64")]
12mod two_level_storage;
13
14#[cfg(target_pointer_width = "32")]
15type ChosenMapStateStorage = byte_map_storage::ByteMapStateStorage;
16#[cfg(target_pointer_width = "64")]
17type ChosenMapStateStorage = two_level_storage::TwoLevelStateStorage;
18
19/// A range of whole chunks.  Always aligned.
20///
21/// This type is used internally by the chunk state mmapper and its storage backends.
22#[derive(Clone, Copy)]
23struct ChunkRange {
24    start: Address,
25    bytes: usize,
26}
27
28impl ChunkRange {
29    fn new_aligned(start: Address, bytes: usize) -> Self {
30        debug_assert!(
31            start.is_aligned_to(BYTES_IN_CHUNK),
32            "start {start} is not chunk-aligned"
33        );
34        debug_assert!(
35            raw_is_aligned(bytes, BYTES_IN_CHUNK),
36            "bytes 0x{bytes:x} is not a multiple of chunks"
37        );
38        Self { start, bytes }
39    }
40
41    fn new_unaligned(start: Address, bytes: usize) -> Self {
42        let start_aligned = start.align_down(BYTES_IN_CHUNK);
43        let end_aligned = (start + bytes).align_up(BYTES_IN_CHUNK);
44        Self::new_aligned(start_aligned, end_aligned - start_aligned)
45    }
46
47    fn limit(&self) -> Address {
48        self.start + self.bytes
49    }
50
51    fn is_within_limit(&self, limit: Address) -> bool {
52        self.limit() <= limit
53    }
54
55    fn is_empty(&self) -> bool {
56        self.bytes == 0
57    }
58
59    fn is_single_chunk(&self) -> bool {
60        self.bytes == BYTES_IN_CHUNK
61    }
62}
63
64impl std::fmt::Display for ChunkRange {
65    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
66        write!(f, "{}-{}", self.start, self.limit())
67    }
68}
69
70/// The back-end storage of [`ChunkStateMmapper`].  It is responsible for holding the states of each
71/// chunk (eagerly or lazily) and transitioning the states in bulk.
72trait MapStateStorage {
73    /// The logarithm of the address space size this `MapStateStorage` can handle.
74    fn log_mappable_bytes(&self) -> u8;
75
76    /// Return the state of a given `chunk` (must be aligned).
77    ///
78    /// Note that all chunks are logically `MapState::Unmapped` before the states are stored.  They
79    /// include chunks outside the mappable address range.
80    fn get_state(&self, chunk: Address) -> MapState;
81
82    /// Set all chunks within `range` to `state`.
83    fn bulk_set_state(&self, range: ChunkRange, state: MapState);
84
85    /// Visit the chunk states within `range` and allow the `update_fn` callback to inspect and
86    /// change the states.
87    ///
88    /// It visits chunks from low to high addresses, and calls `update_fn(group_range, group_state)`
89    /// for each contiguous chunk range `group_range` that have the same state `group_state`.
90    /// `update_fn` can take actions accordingly and return one of the three values:
91    /// -   `Err(err)`: Stop visiting and return `Err(err)` from `bulk_transition_state`
92    ///     immediately.
93    /// -   `Ok(None)`: Continue visiting the next chunk range without changing chunk states.
94    /// -   `Ok(Some(new_state))`: Set the state of all chunks within `group_range` to `new_state`.
95    ///
96    /// Return `Ok(())` if finished visiting all chunks normally.
97    fn bulk_transition_state<F>(&self, range: ChunkRange, update_fn: F) -> MmapResult<()>
98    where
99        F: FnMut(ChunkRange, MapState) -> MmapResult<Option<MapState>>;
100}
101
102/// A [`Mmapper`] implementation based on a logical array of chunk states.
103///
104/// The [`ChunkStateMmapper::storage`] field holds the state of each chunk, and the
105/// [`ChunkStateMmapper`] itself actually makes system calls to manage the memory mapping.
106///
107/// As the name suggests, this implementation of [`Mmapper`] operates at the granularity of chunks.
108pub struct ChunkStateMmapper {
109    /// Lock for transitioning map states.
110    transition_lock: Mutex<()>,
111    /// This holds the [`MapState`] for each chunk.
112    storage: ChosenMapStateStorage,
113}
114
115impl ChunkStateMmapper {
116    pub fn new() -> Self {
117        Self {
118            transition_lock: Default::default(),
119            storage: ChosenMapStateStorage::new(),
120        }
121    }
122
123    #[cfg(test)]
124    fn get_state(&self, chunk: Address) -> MapState {
125        self.storage.get_state(chunk)
126    }
127}
128
129impl Mmapper for ChunkStateMmapper {
130    fn log_granularity(&self) -> u8 {
131        LOG_BYTES_IN_CHUNK as u8
132    }
133
134    fn log_mappable_bytes(&self) -> u8 {
135        self.storage.log_mappable_bytes()
136    }
137
138    fn mark_as_mapped(&self, start: Address, bytes: usize) {
139        let _guard = self.transition_lock.lock().unwrap();
140
141        let range = ChunkRange::new_unaligned(start, bytes);
142        self.storage.bulk_set_state(range, MapState::Mapped);
143    }
144
145    fn quarantine_address_range(
146        &self,
147        start: Address,
148        pages: usize,
149        huge_page_option: HugePageSupport,
150        anno: &MmapAnnotation,
151    ) -> MmapResult<()> {
152        let _guard = self.transition_lock.lock().unwrap();
153
154        let bytes = pages << LOG_BYTES_IN_PAGE;
155        let range = ChunkRange::new_unaligned(start, bytes);
156
157        self.storage
158            .bulk_transition_state(range, |group_range, state| {
159                let group_start: Address = group_range.start;
160                let group_bytes = group_range.bytes;
161
162                match state {
163                    MapState::Unmapped => {
164                        trace!("Trying to quarantine {group_range}");
165                        let mmap_strategy = MmapStrategy::default()
166                            .huge_page(huge_page_option)
167                            .prot(MmapProtection::NoAccess)
168                            .reserve(false)
169                            .replace(false);
170                        OS::dzmmap(group_start, group_bytes, mmap_strategy, anno)?;
171                        Ok(Some(MapState::Quarantined))
172                    }
173                    MapState::Quarantined => {
174                        trace!("Already quarantine {group_range}");
175                        Ok(None)
176                    }
177                    MapState::Mapped => {
178                        trace!("Already mapped {group_range}");
179                        Ok(None)
180                    }
181                }
182            })
183    }
184
185    fn ensure_mapped(
186        &self,
187        start: Address,
188        pages: usize,
189        huge_page_option: HugePageSupport,
190        prot: MmapProtection,
191        anno: &MmapAnnotation,
192    ) -> MmapResult<()> {
193        let _guard = self.transition_lock.lock().unwrap();
194
195        let bytes = pages << LOG_BYTES_IN_PAGE;
196        let range = ChunkRange::new_unaligned(start, bytes);
197
198        let mmap_strategy = MmapStrategy::default()
199            .huge_page(huge_page_option)
200            .prot(prot)
201            .reserve(true);
202
203        self.storage
204            .bulk_transition_state(range, |group_range, state| {
205                let group_start: Address = group_range.start;
206                let group_bytes = group_range.bytes;
207
208                match state {
209                    MapState::Unmapped => {
210                        OS::dzmmap(group_start, group_bytes, mmap_strategy.replace(false), anno)?;
211                        Ok(Some(MapState::Mapped))
212                    }
213                    MapState::Quarantined => {
214                        OS::dzmmap(group_start, group_bytes, mmap_strategy.replace(true), anno)?;
215                        Ok(Some(MapState::Mapped))
216                    }
217                    MapState::Mapped => Ok(None),
218                }
219            })
220    }
221
222    fn is_mapped_address(&self, addr: Address) -> bool {
223        self.storage.get_state(addr) == MapState::Mapped
224    }
225}
226
227/// The mmap state of a mmap chunk.
228#[repr(u8)]
229#[derive(Copy, Clone, PartialEq, Eq, Debug, NoUninit)]
230enum MapState {
231    /// The chunk is unmapped and not managed by MMTk.
232    Unmapped,
233    /// The chunk is reserved for future use. MMTk reserved the address range but hasn't used it yet.
234    /// We have reserved the addresss range with mmap_noreserve with PROT_NONE.
235    Quarantined,
236    /// The chunk is mapped by MMTk and is in use.
237    Mapped,
238}
239
240#[cfg(test)]
241mod tests {
242    use super::*;
243    use crate::mmap_anno_test;
244    use crate::util::constants::LOG_BYTES_IN_PAGE;
245    use crate::util::test_util::CHUNK_STATE_MMAPPER_TEST_REGION;
246    use crate::util::test_util::{serial_test, with_cleanup};
247    use crate::util::{conversions, Address};
248
249    const FIXED_ADDRESS: Address = CHUNK_STATE_MMAPPER_TEST_REGION.start;
250    const MAX_BYTES: usize = CHUNK_STATE_MMAPPER_TEST_REGION.size;
251
252    fn pages_to_chunks_up(pages: usize) -> usize {
253        conversions::raw_align_up(pages, BYTES_IN_CHUNK) / BYTES_IN_CHUNK
254    }
255
256    fn get_chunk_map_state(mmapper: &ChunkStateMmapper, chunk: Address) -> MapState {
257        chunk.is_aligned_to(BYTES_IN_CHUNK);
258        mmapper.get_state(chunk)
259    }
260
261    #[test]
262    fn ensure_mapped_1page() {
263        serial_test(|| {
264            let pages = 1;
265            with_cleanup(
266                || {
267                    let mmapper = ChunkStateMmapper::new();
268                    mmapper
269                        .ensure_mapped(
270                            FIXED_ADDRESS,
271                            pages,
272                            HugePageSupport::No,
273                            MmapProtection::ReadWrite,
274                            mmap_anno_test!(),
275                        )
276                        .unwrap();
277
278                    let chunks = pages_to_chunks_up(pages);
279                    for i in 0..chunks {
280                        assert_eq!(
281                            get_chunk_map_state(
282                                &mmapper,
283                                FIXED_ADDRESS + (i << LOG_BYTES_IN_CHUNK)
284                            ),
285                            MapState::Mapped
286                        );
287                    }
288                },
289                || {
290                    OS::munmap(FIXED_ADDRESS, MAX_BYTES).unwrap();
291                },
292            )
293        })
294    }
295    #[test]
296    fn ensure_mapped_1chunk() {
297        serial_test(|| {
298            let pages = BYTES_IN_CHUNK >> LOG_BYTES_IN_PAGE as usize;
299            with_cleanup(
300                || {
301                    let mmapper = ChunkStateMmapper::new();
302                    mmapper
303                        .ensure_mapped(
304                            FIXED_ADDRESS,
305                            pages,
306                            HugePageSupport::No,
307                            MmapProtection::ReadWrite,
308                            mmap_anno_test!(),
309                        )
310                        .unwrap();
311
312                    let chunks = pages_to_chunks_up(pages);
313                    for i in 0..chunks {
314                        assert_eq!(
315                            get_chunk_map_state(
316                                &mmapper,
317                                FIXED_ADDRESS + (i << LOG_BYTES_IN_CHUNK)
318                            ),
319                            MapState::Mapped
320                        );
321                    }
322                },
323                || {
324                    OS::munmap(FIXED_ADDRESS, MAX_BYTES).unwrap();
325                },
326            )
327        })
328    }
329
330    #[test]
331    fn ensure_mapped_more_than_1chunk() {
332        serial_test(|| {
333            let pages = (BYTES_IN_CHUNK + BYTES_IN_CHUNK / 2) >> LOG_BYTES_IN_PAGE as usize;
334            with_cleanup(
335                || {
336                    let mmapper = ChunkStateMmapper::new();
337                    mmapper
338                        .ensure_mapped(
339                            FIXED_ADDRESS,
340                            pages,
341                            HugePageSupport::No,
342                            MmapProtection::ReadWrite,
343                            mmap_anno_test!(),
344                        )
345                        .unwrap();
346
347                    let chunks = pages_to_chunks_up(pages);
348                    for i in 0..chunks {
349                        assert_eq!(
350                            get_chunk_map_state(
351                                &mmapper,
352                                FIXED_ADDRESS + (i << LOG_BYTES_IN_CHUNK)
353                            ),
354                            MapState::Mapped
355                        );
356                    }
357                },
358                || {
359                    OS::munmap(FIXED_ADDRESS, MAX_BYTES).unwrap();
360                },
361            )
362        })
363    }
364}