mmtk/util/heap/layout/mmapper/csm/
byte_map_storage.rs

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
use super::MapState;
use crate::util::heap::layout::mmapper::csm::ChunkRange;
use crate::util::heap::layout::mmapper::csm::MapStateStorage;
use crate::util::rust_util::rev_group::RevisitableGroupByForIterator;
use crate::util::Address;

use crate::util::heap::layout::vm_layout::*;
use std::fmt;
use std::sync::atomic::Ordering;
use std::sync::Mutex;

use atomic::Atomic;
use std::io::Result;

/// Logarithm of the address space size that [`ByteMapStateStorage`] is able to handle.
/// This is enough for 32-bit architectures.
/// We may increase it beyond 32 so that it is usable on 64-bit machines in certain VMs with
/// limited address spaces, too.
const LOG_MAPPABLE_BYTES: usize = 32;

/// For now, we only use `ByteMapStateStorage` for 32-bit address range.
const MMAP_NUM_CHUNKS: usize = 1 << (LOG_MAPPABLE_BYTES - LOG_BYTES_IN_CHUNK);

/// A [`MapStateStorage`] implementation based on a simple array.
///
/// Currently it is sized to cover a 32-bit address range.
pub struct ByteMapStateStorage {
    lock: Mutex<()>,
    mapped: [Atomic<MapState>; MMAP_NUM_CHUNKS],
}

impl fmt::Debug for ByteMapStateStorage {
    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
        write!(f, "ByteMapStateStorage({})", MMAP_NUM_CHUNKS)
    }
}

impl MapStateStorage for ByteMapStateStorage {
    fn log_mappable_bytes(&self) -> u8 {
        LOG_MAPPABLE_BYTES as u8
    }

    fn get_state(&self, chunk: Address) -> MapState {
        let index = chunk >> LOG_BYTES_IN_CHUNK;
        let Some(slot) = self.mapped.get(index) else {
            return MapState::Unmapped;
        };
        slot.load(Ordering::Relaxed)
    }

    fn bulk_set_state(&self, range: ChunkRange, state: MapState) {
        if range.is_empty() {
            return;
        }

        let index_start = range.start >> LOG_BYTES_IN_CHUNK;
        let index_limit = range.limit() >> LOG_BYTES_IN_CHUNK;
        for index in index_start..index_limit {
            self.mapped[index].store(state, Ordering::Relaxed);
        }
    }

    fn bulk_transition_state<F>(&self, range: ChunkRange, mut update_fn: F) -> Result<()>
    where
        F: FnMut(ChunkRange, MapState) -> Result<Option<MapState>>,
    {
        if range.is_empty() {
            return Ok(());
        }

        if range.is_single_chunk() {
            let chunk = range.start;
            let index = chunk >> LOG_BYTES_IN_CHUNK;
            let slot: &Atomic<MapState> = &self.mapped[index];
            let state = slot.load(Ordering::Relaxed);
            if let Some(new_state) = update_fn(range, state)? {
                slot.store(new_state, Ordering::Relaxed);
            }
            return Ok(());
        }

        let index_start = range.start >> LOG_BYTES_IN_CHUNK;
        let index_limit = range.limit() >> LOG_BYTES_IN_CHUNK;

        let mut group_start = index_start;
        for group in self.mapped.as_slice()[index_start..index_limit]
            .iter()
            .revisitable_group_by(|s| s.load(Ordering::Relaxed))
        {
            let state = group.key;
            let group_end = group_start + group.len;
            let group_start_addr =
                unsafe { Address::from_usize(group_start << LOG_BYTES_IN_CHUNK) };
            let group_bytes = group.len << LOG_BYTES_IN_CHUNK;
            let group_range = ChunkRange::new_aligned(group_start_addr, group_bytes);
            if let Some(new_state) = update_fn(group_range, state)? {
                for index in group_start..group_end {
                    self.mapped[index].store(new_state, Ordering::Relaxed);
                }
            }
            group_start = group_end;
        }

        Ok(())
    }
}

impl ByteMapStateStorage {
    pub fn new() -> Self {
        // Because AtomicU8 does not implement Copy, it is a compilation error to usen the
        // expression `[Atomic::new(MapState::Unmapped); MMAP_NUM_CHUNKS]` because that involves
        // copying.  We must define a constant for it.
        //
        // TODO: Use the inline const expression `const { Atomic::new(MapState::Unmapped) }` after
        // we bump MSRV to 1.79.

        // If we declare a const Atomic, Clippy will warn about const items being interior mutable.
        // Using inline const expression will eliminate this warning, but that is experimental until
        // 1.79.  Fix it after we bump MSRV.
        #[allow(clippy::declare_interior_mutable_const)]
        const INITIAL_ENTRY: Atomic<MapState> = Atomic::new(MapState::Unmapped);

        ByteMapStateStorage {
            lock: Mutex::new(()),
            mapped: [INITIAL_ENTRY; MMAP_NUM_CHUNKS],
        }
    }
}

impl Default for ByteMapStateStorage {
    fn default() -> Self {
        Self::new()
    }
}