mmtk/util/heap/layout/mmapper/csm/
mod.rs

1use crate::util::constants::LOG_BYTES_IN_PAGE;
2use crate::util::conversions::raw_is_aligned;
3use crate::util::heap::layout::vm_layout::*;
4use crate::util::heap::layout::Mmapper;
5use crate::util::memory::*;
6use crate::util::Address;
7use bytemuck::NoUninit;
8use std::{io::Result, sync::Mutex};
9
10mod byte_map_storage;
11#[cfg(target_pointer_width = "64")]
12mod two_level_storage;
13
14#[cfg(target_pointer_width = "32")]
15type ChosenMapStateStorage = byte_map_storage::ByteMapStateStorage;
16#[cfg(target_pointer_width = "64")]
17type ChosenMapStateStorage = two_level_storage::TwoLevelStateStorage;
18
19/// A range of whole chunks.  Always aligned.
20///
21/// This type is used internally by the chunk state mmapper and its storage backends.
22#[derive(Clone, Copy)]
23struct ChunkRange {
24    start: Address,
25    bytes: usize,
26}
27
28impl ChunkRange {
29    fn new_aligned(start: Address, bytes: usize) -> Self {
30        debug_assert!(
31            start.is_aligned_to(BYTES_IN_CHUNK),
32            "start {start} is not chunk-aligned"
33        );
34        debug_assert!(
35            raw_is_aligned(bytes, BYTES_IN_CHUNK),
36            "bytes 0x{bytes:x} is not a multiple of chunks"
37        );
38        Self { start, bytes }
39    }
40
41    fn new_unaligned(start: Address, bytes: usize) -> Self {
42        let start_aligned = start.align_down(BYTES_IN_CHUNK);
43        let end_aligned = (start + bytes).align_up(BYTES_IN_CHUNK);
44        Self::new_aligned(start_aligned, end_aligned - start_aligned)
45    }
46
47    fn limit(&self) -> Address {
48        self.start + self.bytes
49    }
50
51    fn is_within_limit(&self, limit: Address) -> bool {
52        self.limit() <= limit
53    }
54
55    fn is_empty(&self) -> bool {
56        self.bytes == 0
57    }
58
59    fn is_single_chunk(&self) -> bool {
60        self.bytes == BYTES_IN_CHUNK
61    }
62}
63
64impl std::fmt::Display for ChunkRange {
65    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
66        write!(f, "{}-{}", self.start, self.limit())
67    }
68}
69
70/// The back-end storage of [`ChunkStateMmapper`].  It is responsible for holding the states of each
71/// chunk (eagerly or lazily) and transitioning the states in bulk.
72trait MapStateStorage {
73    /// The logarithm of the address space size this `MapStateStorage` can handle.
74    fn log_mappable_bytes(&self) -> u8;
75
76    /// Return the state of a given `chunk` (must be aligned).
77    ///
78    /// Note that all chunks are logically `MapState::Unmapped` before the states are stored.  They
79    /// include chunks outside the mappable address range.
80    fn get_state(&self, chunk: Address) -> MapState;
81
82    /// Set all chunks within `range` to `state`.
83    fn bulk_set_state(&self, range: ChunkRange, state: MapState);
84
85    /// Visit the chunk states within `range` and allow the `update_fn` callback to inspect and
86    /// change the states.
87    ///
88    /// It visits chunks from low to high addresses, and calls `update_fn(group_range, group_state)`
89    /// for each contiguous chunk range `group_range` that have the same state `group_state`.
90    /// `update_fn` can take actions accordingly and return one of the three values:
91    /// -   `Err(err)`: Stop visiting and return `Err(err)` from `bulk_transition_state`
92    ///     immediately.
93    /// -   `Ok(None)`: Continue visiting the next chunk range without changing chunk states.
94    /// -   `Ok(Some(new_state))`: Set the state of all chunks within `group_range` to `new_state`.
95    ///
96    /// Return `Ok(())` if finished visiting all chunks normally.
97    fn bulk_transition_state<F>(&self, range: ChunkRange, update_fn: F) -> Result<()>
98    where
99        F: FnMut(ChunkRange, MapState) -> Result<Option<MapState>>;
100}
101
102/// A [`Mmapper`] implementation based on a logical array of chunk states.
103///
104/// The [`ChunkStateMmapper::storage`] field holds the state of each chunk, and the
105/// [`ChunkStateMmapper`] itself actually makes system calls to manage the memory mapping.
106///
107/// As the name suggests, this implementation of [`Mmapper`] operates at the granularity of chunks.
108pub struct ChunkStateMmapper {
109    /// Lock for transitioning map states.
110    transition_lock: Mutex<()>,
111    /// This holds the [`MapState`] for each chunk.
112    storage: ChosenMapStateStorage,
113}
114
115impl ChunkStateMmapper {
116    pub fn new() -> Self {
117        Self {
118            transition_lock: Default::default(),
119            storage: ChosenMapStateStorage::new(),
120        }
121    }
122
123    #[cfg(test)]
124    fn get_state(&self, chunk: Address) -> MapState {
125        self.storage.get_state(chunk)
126    }
127}
128
129impl Mmapper for ChunkStateMmapper {
130    fn log_granularity(&self) -> u8 {
131        LOG_BYTES_IN_CHUNK as u8
132    }
133
134    fn log_mappable_bytes(&self) -> u8 {
135        self.storage.log_mappable_bytes()
136    }
137
138    fn mark_as_mapped(&self, start: Address, bytes: usize) {
139        let _guard = self.transition_lock.lock().unwrap();
140
141        let range = ChunkRange::new_unaligned(start, bytes);
142        self.storage.bulk_set_state(range, MapState::Mapped);
143    }
144
145    fn quarantine_address_range(
146        &self,
147        start: Address,
148        pages: usize,
149        strategy: MmapStrategy,
150        anno: &MmapAnnotation,
151    ) -> Result<()> {
152        let _guard = self.transition_lock.lock().unwrap();
153
154        let bytes = pages << LOG_BYTES_IN_PAGE;
155        let range = ChunkRange::new_unaligned(start, bytes);
156
157        self.storage
158            .bulk_transition_state(range, |group_range, state| {
159                let group_start: Address = group_range.start;
160                let group_bytes = group_range.bytes;
161
162                match state {
163                    MapState::Unmapped => {
164                        trace!("Trying to quarantine {group_range}");
165                        mmap_noreserve(group_start, group_bytes, strategy, anno)?;
166                        Ok(Some(MapState::Quarantined))
167                    }
168                    MapState::Quarantined => {
169                        trace!("Already quarantine {group_range}");
170                        Ok(None)
171                    }
172                    MapState::Mapped => {
173                        trace!("Already mapped {group_range}");
174                        Ok(None)
175                    }
176                }
177            })
178    }
179
180    fn ensure_mapped(
181        &self,
182        start: Address,
183        pages: usize,
184        strategy: MmapStrategy,
185        anno: &MmapAnnotation,
186    ) -> Result<()> {
187        let _guard = self.transition_lock.lock().unwrap();
188
189        let bytes = pages << LOG_BYTES_IN_PAGE;
190        let range = ChunkRange::new_unaligned(start, bytes);
191
192        self.storage
193            .bulk_transition_state(range, |group_range, state| {
194                let group_start: Address = group_range.start;
195                let group_bytes = group_range.bytes;
196
197                match state {
198                    MapState::Unmapped => {
199                        dzmmap_noreplace(group_start, group_bytes, strategy, anno)?;
200                        Ok(Some(MapState::Mapped))
201                    }
202                    MapState::Quarantined => {
203                        unsafe { dzmmap(group_start, group_bytes, strategy, anno) }?;
204                        Ok(Some(MapState::Mapped))
205                    }
206                    MapState::Mapped => Ok(None),
207                }
208            })
209    }
210
211    fn is_mapped_address(&self, addr: Address) -> bool {
212        self.storage.get_state(addr) == MapState::Mapped
213    }
214}
215
216/// The mmap state of a mmap chunk.
217#[repr(u8)]
218#[derive(Copy, Clone, PartialEq, Eq, Debug, NoUninit)]
219enum MapState {
220    /// The chunk is unmapped and not managed by MMTk.
221    Unmapped,
222    /// The chunk is reserved for future use. MMTk reserved the address range but hasn't used it yet.
223    /// We have reserved the addresss range with mmap_noreserve with PROT_NONE.
224    Quarantined,
225    /// The chunk is mapped by MMTk and is in use.
226    Mapped,
227}
228
229#[cfg(test)]
230mod tests {
231    use super::*;
232    use crate::mmap_anno_test;
233    use crate::util::constants::LOG_BYTES_IN_PAGE;
234    use crate::util::memory;
235    use crate::util::test_util::CHUNK_STATE_MMAPPER_TEST_REGION;
236    use crate::util::test_util::{serial_test, with_cleanup};
237    use crate::util::{conversions, Address};
238
239    const FIXED_ADDRESS: Address = CHUNK_STATE_MMAPPER_TEST_REGION.start;
240    const MAX_BYTES: usize = CHUNK_STATE_MMAPPER_TEST_REGION.size;
241
242    fn pages_to_chunks_up(pages: usize) -> usize {
243        conversions::raw_align_up(pages, BYTES_IN_CHUNK) / BYTES_IN_CHUNK
244    }
245
246    fn get_chunk_map_state(mmapper: &ChunkStateMmapper, chunk: Address) -> MapState {
247        chunk.is_aligned_to(BYTES_IN_CHUNK);
248        mmapper.get_state(chunk)
249    }
250
251    #[test]
252    fn ensure_mapped_1page() {
253        serial_test(|| {
254            let pages = 1;
255            with_cleanup(
256                || {
257                    let mmapper = ChunkStateMmapper::new();
258                    mmapper
259                        .ensure_mapped(FIXED_ADDRESS, pages, MmapStrategy::TEST, mmap_anno_test!())
260                        .unwrap();
261
262                    let chunks = pages_to_chunks_up(pages);
263                    for i in 0..chunks {
264                        assert_eq!(
265                            get_chunk_map_state(
266                                &mmapper,
267                                FIXED_ADDRESS + (i << LOG_BYTES_IN_CHUNK)
268                            ),
269                            MapState::Mapped
270                        );
271                    }
272                },
273                || {
274                    memory::munmap(FIXED_ADDRESS, MAX_BYTES).unwrap();
275                },
276            )
277        })
278    }
279    #[test]
280    fn ensure_mapped_1chunk() {
281        serial_test(|| {
282            let pages = BYTES_IN_CHUNK >> LOG_BYTES_IN_PAGE as usize;
283            with_cleanup(
284                || {
285                    let mmapper = ChunkStateMmapper::new();
286                    mmapper
287                        .ensure_mapped(FIXED_ADDRESS, pages, MmapStrategy::TEST, mmap_anno_test!())
288                        .unwrap();
289
290                    let chunks = pages_to_chunks_up(pages);
291                    for i in 0..chunks {
292                        assert_eq!(
293                            get_chunk_map_state(
294                                &mmapper,
295                                FIXED_ADDRESS + (i << LOG_BYTES_IN_CHUNK)
296                            ),
297                            MapState::Mapped
298                        );
299                    }
300                },
301                || {
302                    memory::munmap(FIXED_ADDRESS, MAX_BYTES).unwrap();
303                },
304            )
305        })
306    }
307
308    #[test]
309    fn ensure_mapped_more_than_1chunk() {
310        serial_test(|| {
311            let pages = (BYTES_IN_CHUNK + BYTES_IN_CHUNK / 2) >> LOG_BYTES_IN_PAGE as usize;
312            with_cleanup(
313                || {
314                    let mmapper = ChunkStateMmapper::new();
315                    mmapper
316                        .ensure_mapped(FIXED_ADDRESS, pages, MmapStrategy::TEST, mmap_anno_test!())
317                        .unwrap();
318
319                    let chunks = pages_to_chunks_up(pages);
320                    for i in 0..chunks {
321                        assert_eq!(
322                            get_chunk_map_state(
323                                &mmapper,
324                                FIXED_ADDRESS + (i << LOG_BYTES_IN_CHUNK)
325                            ),
326                            MapState::Mapped
327                        );
328                    }
329                },
330                || {
331                    memory::munmap(FIXED_ADDRESS, MAX_BYTES).unwrap();
332                },
333            )
334        })
335    }
336}