mmtk/util/heap/layout/
map32.rs1use super::map::CreateFreeListResult;
2use super::map::VMMap;
3use crate::mmtk::SFT_MAP;
4use crate::util::conversions;
5use crate::util::freelist::FreeList;
6use crate::util::heap::layout::heap_parameters::*;
7use crate::util::heap::layout::vm_layout::*;
8use crate::util::heap::space_descriptor::SpaceDescriptor;
9use crate::util::int_array_freelist::IntArrayFreeList;
10use crate::util::rust_util::zeroed_alloc::new_zeroed_vec;
11use crate::util::Address;
12use std::cell::UnsafeCell;
13use std::sync::{Mutex, MutexGuard};
14
15pub struct Map32 {
16 sync: Mutex<()>,
17 inner: UnsafeCell<Map32Inner>,
18}
19
20#[doc(hidden)]
21pub struct Map32Inner {
22 prev_link: Vec<i32>,
23 next_link: Vec<i32>,
24 region_map: IntArrayFreeList,
25 global_page_map: IntArrayFreeList,
26 shared_discontig_fl_count: usize,
27 total_available_discontiguous_chunks: usize,
28 finalized: bool,
29 descriptor_map: Vec<SpaceDescriptor>,
30}
31
32unsafe impl Send for Map32 {}
33unsafe impl Sync for Map32 {}
34
35impl Map32 {
36 pub fn new() -> Self {
37 let max_chunks = vm_layout().max_chunks();
38 Map32 {
39 inner: UnsafeCell::new(Map32Inner {
40 prev_link: vec![0; max_chunks],
41 next_link: vec![0; max_chunks],
42 region_map: IntArrayFreeList::new(max_chunks, max_chunks as _, 1),
43 global_page_map: IntArrayFreeList::new(1, 1, MAX_SPACES),
44 shared_discontig_fl_count: 0,
45 total_available_discontiguous_chunks: 0,
46 finalized: false,
47 descriptor_map: new_zeroed_vec(max_chunks),
49 }),
50 sync: Mutex::new(()),
51 }
52 }
53}
54
55impl std::ops::Deref for Map32 {
56 type Target = Map32Inner;
57 fn deref(&self) -> &Self::Target {
58 unsafe { &*self.inner.get() }
59 }
60}
61
62impl VMMap for Map32 {
63 fn insert(&self, start: Address, extent: usize, descriptor: SpaceDescriptor) {
64 let self_mut: &mut Map32Inner = unsafe { self.mut_self() };
67 let mut e = 0;
68 while e < extent {
69 let index = (start + e).chunk_index();
70 assert!(
71 self.descriptor_map[index].is_empty(),
72 "Conflicting virtual address request"
73 );
74 debug!(
75 "Set descriptor {:?} for Chunk {}",
76 descriptor,
77 conversions::chunk_index_to_address(index)
78 );
79 self_mut.descriptor_map[index] = descriptor;
80 e += BYTES_IN_CHUNK;
82 }
83 }
84
85 fn create_freelist(&self, _start: Address) -> CreateFreeListResult {
86 let free_list = Box::new(IntArrayFreeList::from_parent(
87 &self.global_page_map,
88 self.get_discontig_freelist_pr_ordinal() as _,
89 ));
90 CreateFreeListResult {
91 free_list,
92 space_displacement: 0,
93 }
94 }
95
96 fn create_parent_freelist(
97 &self,
98 _start: Address,
99 units: usize,
100 grain: i32,
101 ) -> CreateFreeListResult {
102 let free_list = Box::new(IntArrayFreeList::new(units, grain, 1));
103 CreateFreeListResult {
104 free_list,
105 space_displacement: 0,
106 }
107 }
108
109 unsafe fn allocate_contiguous_chunks(
110 &self,
111 descriptor: SpaceDescriptor,
112 chunks: usize,
113 head: Address,
114 _maybe_freelist: Option<&mut dyn FreeList>,
115 ) -> Address {
116 let (_sync, self_mut) = self.mut_self_with_sync();
117 let chunk = self_mut.region_map.alloc(chunks as _);
118 debug_assert!(chunk != 0);
119 if chunk == -1 {
120 return Address::zero();
121 }
122 self_mut.total_available_discontiguous_chunks -= chunks;
123 let rtn = conversions::chunk_index_to_address(chunk as _);
124 self.insert(rtn, chunks << LOG_BYTES_IN_CHUNK, descriptor);
125 if head.is_zero() {
126 debug_assert!(self.next_link[chunk as usize] == 0);
127 } else {
128 self_mut.next_link[chunk as usize] = head.chunk_index() as _;
129 self_mut.prev_link[head.chunk_index()] = chunk;
130 }
131 debug_assert!(self.prev_link[chunk as usize] == 0);
132 rtn
133 }
134
135 fn get_next_contiguous_region(&self, start: Address) -> Address {
136 debug_assert!(start == conversions::chunk_align_down(start));
137 let chunk = start.chunk_index();
138 if chunk == 0 || self.next_link[chunk] == 0 {
139 unsafe { Address::zero() }
140 } else {
141 let a = self.next_link[chunk];
142 conversions::chunk_index_to_address(a as _)
143 }
144 }
145
146 fn get_contiguous_region_chunks(&self, start: Address) -> usize {
147 debug_assert!(start == conversions::chunk_align_down(start));
148 let chunk = start.chunk_index();
149 self.region_map.size(chunk as i32) as _
150 }
151
152 fn get_contiguous_region_size(&self, start: Address) -> usize {
153 self.get_contiguous_region_chunks(start) << LOG_BYTES_IN_CHUNK
154 }
155
156 fn get_available_discontiguous_chunks(&self) -> usize {
157 self.total_available_discontiguous_chunks
158 }
159
160 fn get_chunk_consumer_count(&self) -> usize {
161 self.shared_discontig_fl_count
162 }
163 #[allow(clippy::while_immutable_condition)]
164 fn free_all_chunks(&self, any_chunk: Address) {
165 debug!("free_all_chunks: {}", any_chunk);
166 let (_sync, self_mut) = self.mut_self_with_sync();
167 debug_assert!(any_chunk == conversions::chunk_align_down(any_chunk));
168 if !any_chunk.is_zero() {
169 let chunk = any_chunk.chunk_index();
170 while self_mut.next_link[chunk] != 0 {
171 let x = self_mut.next_link[chunk];
172 self.free_contiguous_chunks_no_lock(x);
173 }
174 while self_mut.prev_link[chunk] != 0 {
175 let x = self_mut.prev_link[chunk];
176 self.free_contiguous_chunks_no_lock(x);
177 }
178 self.free_contiguous_chunks_no_lock(chunk as _);
179 }
180 }
181
182 unsafe fn free_contiguous_chunks(&self, start: Address) -> usize {
183 debug!("free_contiguous_chunks: {}", start);
184 let (_sync, _) = self.mut_self_with_sync();
185 debug_assert!(start == conversions::chunk_align_down(start));
186 let chunk = start.chunk_index();
187 self.free_contiguous_chunks_no_lock(chunk as _)
188 }
189
190 fn finalize_static_space_map(
191 &self,
192 from: Address,
193 to: Address,
194 on_discontig_start_determined: &mut dyn FnMut(Address),
195 ) {
196 let self_mut: &mut Map32Inner = unsafe { self.mut_self() };
199 let start_address = from;
201 let first_chunk = start_address.chunk_index();
202 let last_chunk = to.chunk_index();
203 let unavail_start_chunk = last_chunk + 1;
204 let trailing_chunks = vm_layout().max_chunks() - unavail_start_chunk;
205 let pages = (1 + last_chunk - first_chunk) * PAGES_IN_CHUNK;
206 self_mut.global_page_map.resize_freelist(pages, pages as _);
209
210 on_discontig_start_determined(start_address);
211
212 self_mut.region_map.alloc(first_chunk as _); for _ in first_chunk..=last_chunk {
224 self_mut.region_map.alloc(1);
225 }
226 let alloced_chunk = self_mut.region_map.alloc(trailing_chunks as _);
227 debug_assert!(
228 alloced_chunk == unavail_start_chunk as i32,
229 "{} != {}",
230 alloced_chunk,
231 unavail_start_chunk
232 );
233 let mut first_page = 0;
235 for chunk_index in first_chunk..=last_chunk {
236 self_mut.total_available_discontiguous_chunks += 1;
237 self_mut.region_map.free(chunk_index as _, false); self_mut.global_page_map.set_uncoalescable(first_page);
239 let alloced_pages = self_mut.global_page_map.alloc(PAGES_IN_CHUNK as _); debug_assert!(alloced_pages == first_page);
241 first_page += PAGES_IN_CHUNK as i32;
242 }
243 self_mut.finalized = true;
244 }
245
246 fn is_finalized(&self) -> bool {
247 self.finalized
248 }
249
250 fn get_descriptor_for_address(&self, address: Address) -> SpaceDescriptor {
251 let index = address.chunk_index();
252 self.descriptor_map
253 .get(index)
254 .copied()
255 .unwrap_or(SpaceDescriptor::UNINITIALIZED)
256 }
257}
258
259impl Map32 {
260 #[allow(clippy::mut_from_ref)]
266 unsafe fn mut_self(&self) -> &mut Map32Inner {
267 &mut *self.inner.get()
268 }
269
270 #[allow(clippy::mut_from_ref)]
273 fn mut_self_with_sync(&self) -> (MutexGuard<'_, ()>, &mut Map32Inner) {
274 let guard = self.sync.lock().unwrap();
275 (guard, unsafe { self.mut_self() })
276 }
277
278 fn free_contiguous_chunks_no_lock(&self, chunk: i32) -> usize {
279 unsafe {
280 let chunks = self.mut_self().region_map.free(chunk, false);
281 self.mut_self().total_available_discontiguous_chunks += chunks as usize;
282 let next = self.next_link[chunk as usize];
283 let prev = self.prev_link[chunk as usize];
284 if next != 0 {
285 self.mut_self().prev_link[next as usize] = prev
286 };
287 if prev != 0 {
288 self.mut_self().next_link[prev as usize] = next
289 };
290 self.mut_self().prev_link[chunk as usize] = 0;
291 self.mut_self().next_link[chunk as usize] = 0;
292 for offset in 0..chunks {
293 let index = (chunk + offset) as usize;
294 let chunk_start = conversions::chunk_index_to_address(index);
295 debug!("Clear descriptor for Chunk {}", chunk_start);
296 self.mut_self().descriptor_map[index] = SpaceDescriptor::UNINITIALIZED;
297 SFT_MAP.clear(chunk_start);
298 }
299 chunks as _
300 }
301 }
302
303 fn get_discontig_freelist_pr_ordinal(&self) -> usize {
304 let self_mut: &mut Map32Inner = unsafe { self.mut_self() };
306 self_mut.shared_discontig_fl_count += 1;
307 self.shared_discontig_fl_count
308 }
309}
310
311impl Default for Map32 {
312 fn default() -> Self {
313 Self::new()
314 }
315}