mmtk/util/heap/layout/mmapper/csm/
two_level_storage.rs1use super::MapState;
7use crate::util::heap::layout::mmapper::csm::{ChunkRange, MapStateStorage};
8use crate::util::heap::layout::vm_layout::*;
9use crate::util::rust_util::atomic_box::OnceOptionBox;
10use crate::util::rust_util::rev_group::RevisitableGroupByForIterator;
11use crate::util::rust_util::zeroed_alloc::new_zeroed_vec;
12use crate::util::Address;
13use atomic::{Atomic, Ordering};
14use std::fmt;
15use std::io::Result;
16
17const LOG_MAPPABLE_BYTES: usize = 48;
21const MAPPABLE_BYTES: usize = 1 << LOG_MAPPABLE_BYTES;
23const MAPPABLE_ADDRESS_LIMIT: Address = unsafe { Address::from_usize(MAPPABLE_BYTES) };
25
26const LOG_MMAP_SLAB_BYTES: usize =
33 LOG_BYTES_IN_CHUNK + (LOG_MAPPABLE_BYTES - LOG_BYTES_IN_CHUNK) / 2;
34const MMAP_SLAB_BYTES: usize = 1 << LOG_MMAP_SLAB_BYTES;
36
37const LOG_MMAP_CHUNKS_PER_SLAB: usize = LOG_MMAP_SLAB_BYTES - LOG_BYTES_IN_CHUNK;
39const MMAP_CHUNKS_PER_SLAB: usize = 1 << LOG_MMAP_CHUNKS_PER_SLAB;
41
42const MMAP_SLAB_MASK: usize = (1 << LOG_MMAP_SLAB_BYTES) - 1;
45
46const LOG_MAX_SLABS: usize = LOG_MAPPABLE_BYTES - LOG_MMAP_SLAB_BYTES;
48const MAX_SLABS: usize = 1 << LOG_MAX_SLABS;
50
51type Slab = [Atomic<MapState>; MMAP_CHUNKS_PER_SLAB];
53
54pub struct TwoLevelStateStorage {
62 slabs: Vec<OnceOptionBox<Slab>>,
64}
65
66unsafe impl Send for TwoLevelStateStorage {}
67unsafe impl Sync for TwoLevelStateStorage {}
68
69impl fmt::Debug for TwoLevelStateStorage {
70 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
71 write!(f, "TwoLevelMapper({})", 1 << LOG_MAX_SLABS)
72 }
73}
74
75impl MapStateStorage for TwoLevelStateStorage {
76 fn log_mappable_bytes(&self) -> u8 {
77 LOG_MAPPABLE_BYTES as u8
78 }
79
80 fn get_state(&self, chunk: Address) -> MapState {
81 let Some(slab) = self.slab_table(chunk) else {
82 return MapState::Unmapped;
83 };
84 slab[Self::in_slab_index(chunk)].load(Ordering::Relaxed)
85 }
86
87 fn bulk_set_state(&self, range: ChunkRange, state: MapState) {
88 if range.is_empty() {
89 return;
90 }
91
92 if range.is_single_chunk() {
93 let addr = range.start;
94 let slab = self.get_or_allocate_slab_table(addr);
95 slab[Self::in_slab_index(addr)].store(state, Ordering::Relaxed);
96 return;
97 }
98
99 self.foreach_slab_slice_for_write(range, |slice| {
100 for slot in slice {
101 slot.store(state, Ordering::Relaxed);
102 }
103 });
104 }
105
106 fn bulk_transition_state<F>(&self, range: ChunkRange, mut update_fn: F) -> Result<()>
107 where
108 F: FnMut(ChunkRange, MapState) -> Result<Option<MapState>>,
109 {
110 if range.is_empty() {
111 return Ok(());
112 }
113
114 if range.is_single_chunk() {
115 let addr = range.start;
116 let slab = self.get_or_allocate_slab_table(addr);
117 let slot = &slab[Self::in_slab_index(addr)];
118
119 let old_state = slot.load(Ordering::Relaxed);
120 if let Some(new_state) = update_fn(range, old_state)? {
121 slot.store(new_state, Ordering::Relaxed);
122 };
123
124 return Ok(());
125 }
126
127 let mut slice_indices = Vec::new();
128
129 self.foreach_slab_slice_for_write(range, |slice| {
130 slice_indices.push(slice);
131 });
132
133 let start = range.start;
134 let mut start_index: usize = 0usize;
136
137 for group in slice_indices
138 .iter()
139 .copied()
140 .flatten()
141 .revisitable_group_by(|s| s.load(Ordering::Relaxed))
142 {
143 let state = group.key;
144 let end_index = start_index + group.len;
145 let group_start = start + (start_index << LOG_BYTES_IN_CHUNK);
146 let group_bytes = group.len << LOG_BYTES_IN_CHUNK;
147 let group_range = ChunkRange::new_aligned(group_start, group_bytes);
148
149 if let Some(new_state) = update_fn(group_range, state)? {
150 for slot in group {
151 slot.store(new_state, Ordering::Relaxed);
152 }
153 }
154
155 start_index = end_index;
156 }
157
158 Ok(())
159 }
160}
161
162impl TwoLevelStateStorage {
163 pub fn new() -> Self {
164 Self {
165 slabs: new_zeroed_vec(MAX_SLABS),
166 }
167 }
168
169 fn new_slab() -> Slab {
170 std::array::from_fn(|_| Atomic::new(MapState::Unmapped))
171 }
172
173 fn slab_table(&self, addr: Address) -> Option<&Slab> {
174 let index: usize = Self::slab_index(addr);
175 let slot = self.slabs.get(index)?;
176 slot.get(Ordering::Relaxed)
178 }
179
180 fn get_or_allocate_slab_table(&self, addr: Address) -> &Slab {
181 let index: usize = Self::slab_index(addr);
182 let Some(slot) = self.slabs.get(index) else {
183 panic!("Cannot allocate slab for address: {addr}");
184 };
185 slot.get_or_init(Ordering::Relaxed, Ordering::Relaxed, Self::new_slab)
189 }
190
191 fn slab_index(addr: Address) -> usize {
192 addr >> LOG_MMAP_SLAB_BYTES
193 }
194
195 fn in_slab_index(addr: Address) -> usize {
196 (addr & MMAP_SLAB_MASK) >> LOG_BYTES_IN_CHUNK
197 }
198
199 fn foreach_slab_slice_for_write<'s, F>(&'s self, range: ChunkRange, mut f: F)
202 where
203 F: FnMut(&'s [Atomic<MapState>]),
204 {
205 debug_assert!(
206 range.is_within_limit(MAPPABLE_ADDRESS_LIMIT),
207 "range {range} out of bound"
208 );
209
210 let limit = range.limit();
211 let mut low = range.start;
212 while low < limit {
213 let high = (low + MMAP_SLAB_BYTES)
214 .align_down(MMAP_SLAB_BYTES)
215 .min(limit);
216
217 let slab = self.get_or_allocate_slab_table(low);
218 let low_index = Self::in_slab_index(low);
219 let high_index = Self::in_slab_index(high);
220 let ub_index = if high_index == 0 {
221 MMAP_CHUNKS_PER_SLAB
222 } else {
223 high_index
224 };
225 f(&slab[low_index..ub_index]);
226
227 low = high;
228 }
229 }
230}
231
232impl Default for TwoLevelStateStorage {
233 fn default() -> Self {
234 Self::new()
235 }
236}