1use std::sync::{Mutex, MutexGuard};
2
3use super::layout::vm_layout::PAGES_IN_CHUNK;
4use super::layout::VMMap;
5use super::pageresource::{PRAllocFail, PRAllocResult};
6use super::PageResource;
7use crate::mmtk::MMAPPER;
8use crate::util::address::Address;
9use crate::util::alloc::embedded_meta_data::*;
10use crate::util::conversions;
11use crate::util::freelist;
12use crate::util::freelist::FreeList;
13use crate::util::heap::layout::vm_layout::*;
14use crate::util::heap::layout::CreateFreeListResult;
15use crate::util::heap::pageresource::CommonPageResource;
16use crate::util::heap::space_descriptor::SpaceDescriptor;
17use crate::util::memory;
18use crate::util::opaque_pointer::*;
19use crate::util::raw_memory_freelist::RawMemoryFreeList;
20use crate::vm::*;
21use std::marker::PhantomData;
22
23const UNINITIALIZED_WATER_MARK: i32 = -1;
24
25pub struct FreeListPageResource<VM: VMBinding> {
26 common: CommonPageResource,
27 sync: Mutex<FreeListPageResourceSync>,
28 _p: PhantomData<VM>,
29 pub(crate) protect_memory_on_release: Option<memory::MmapProtection>,
31}
32
33unsafe impl<VM: VMBinding> Send for FreeListPageResource<VM> {}
34unsafe impl<VM: VMBinding> Sync for FreeListPageResource<VM> {}
35
36struct FreeListPageResourceSync {
37 pub(crate) free_list: Box<dyn FreeList>,
38 pages_currently_on_freelist: usize,
39 start: Address,
40 highwater_mark: i32,
41}
42
43impl<VM: VMBinding> PageResource<VM> for FreeListPageResource<VM> {
44 fn common(&self) -> &CommonPageResource {
45 &self.common
46 }
47 fn common_mut(&mut self) -> &mut CommonPageResource {
48 &mut self.common
49 }
50 fn update_discontiguous_start(&mut self, start: Address) {
51 if !self.common.contiguous {
53 let sync = self.sync.get_mut().unwrap();
56 sync.start = start.align_up(BYTES_IN_REGION);
57 }
58 }
59
60 fn get_available_physical_pages(&self) -> usize {
61 let mut rtn = {
62 let sync = self.sync.lock().unwrap();
63 sync.pages_currently_on_freelist
64 };
65
66 if !self.common.contiguous {
67 let chunks: usize = self
68 .common
69 .vm_map
70 .get_available_discontiguous_chunks()
71 .saturating_sub(self.common.vm_map.get_chunk_consumer_count());
72 rtn += chunks * PAGES_IN_CHUNK;
73 } else if self.common.growable && cfg!(target_pointer_width = "64") {
74 rtn = vm_layout().pages_in_space64() - self.reserved_pages();
75 }
76
77 rtn
78 }
79
80 fn alloc_pages(
81 &self,
82 space_descriptor: SpaceDescriptor,
83 reserved_pages: usize,
84 required_pages: usize,
85 tls: VMThread,
86 ) -> Result<PRAllocResult, PRAllocFail> {
87 let mut sync = self.sync.lock().unwrap();
88 let mut new_chunk = false;
89 let mut page_offset = sync.free_list.alloc(required_pages as _);
90 if page_offset == freelist::FAILURE && self.common.growable {
91 page_offset = unsafe {
92 self.allocate_contiguous_chunks(space_descriptor, required_pages, &mut sync)
93 };
94 new_chunk = true;
95 }
96
97 if page_offset == freelist::FAILURE {
98 return Result::Err(PRAllocFail);
99 } else {
100 sync.pages_currently_on_freelist -= required_pages;
101 if page_offset > sync.highwater_mark {
102 if sync.highwater_mark == UNINITIALIZED_WATER_MARK
103 || (page_offset ^ sync.highwater_mark) > PAGES_IN_REGION as i32
104 {
105 new_chunk = true;
106 }
107 sync.highwater_mark = page_offset;
108 }
109 }
110
111 let rtn = sync.start + conversions::pages_to_bytes(page_offset as _);
112 self.commit_pages(reserved_pages, required_pages, tls);
114 if self.protect_memory_on_release.is_some() {
115 if !new_chunk {
116 while !new_chunk && !MMAPPER.is_mapped_address(rtn) {}
127 self.munprotect(rtn, sync.free_list.size(page_offset as _) as _)
128 } else if !self.common.contiguous && new_chunk {
129 if MMAPPER.is_mapped_address(rtn) {
133 self.munprotect(rtn, sync.free_list.size(page_offset as _) as _)
134 }
135 }
136 };
137 Result::Ok(PRAllocResult {
138 start: rtn,
139 pages: required_pages,
140 new_chunk,
141 })
142 }
143}
144
145impl<VM: VMBinding> FreeListPageResource<VM> {
146 pub fn new_contiguous(start: Address, bytes: usize, vm_map: &'static dyn VMMap) -> Self {
147 let pages = conversions::bytes_to_pages_up(bytes);
148 let CreateFreeListResult {
149 free_list,
150 space_displacement,
151 } = vm_map.create_parent_freelist(start, pages, PAGES_IN_REGION as _);
152
153 let actual_start = start + space_displacement;
156 debug!(
157 " in new_contiguous: space_displacement = {:?}, actual_start = {}",
158 space_displacement, actual_start
159 );
160
161 let growable = cfg!(target_pointer_width = "64");
162 FreeListPageResource {
163 common: CommonPageResource::new(true, growable, vm_map),
164 sync: Mutex::new(FreeListPageResourceSync {
165 free_list,
166 pages_currently_on_freelist: if growable { 0 } else { pages },
167 start: actual_start,
168 highwater_mark: UNINITIALIZED_WATER_MARK,
169 }),
170 _p: PhantomData,
171 protect_memory_on_release: None,
172 }
173 }
174
175 pub fn new_discontiguous(vm_map: &'static dyn VMMap) -> Self {
176 let start = vm_layout().available_start();
181
182 let CreateFreeListResult {
183 free_list,
184 space_displacement,
185 } = vm_map.create_freelist(start);
186
187 debug_assert!(
191 free_list.downcast_ref::<RawMemoryFreeList>().is_none(),
192 "We can't allocate RawMemoryFreeList for discontiguous spaces."
193 );
194
195 debug_assert_eq!(space_displacement, 0);
198 debug!("new_discontiguous. start: {start})");
199
200 FreeListPageResource {
201 common: CommonPageResource::new(false, true, vm_map),
202 sync: Mutex::new(FreeListPageResourceSync {
203 free_list,
204 pages_currently_on_freelist: 0,
205 start,
206 highwater_mark: UNINITIALIZED_WATER_MARK,
207 }),
208 _p: PhantomData,
209 protect_memory_on_release: None,
210 }
211 }
212
213 fn mprotect(&self, start: Address, pages: usize) {
215 assert!(self.protect_memory_on_release.is_some());
222 if let Err(e) = memory::mprotect(start, conversions::pages_to_bytes(pages)) {
223 panic!(
224 "Failed at protecting memory (starting at {}): {:?}",
225 start, e
226 );
227 }
228 }
229
230 fn munprotect(&self, start: Address, pages: usize) {
232 assert!(self.protect_memory_on_release.is_some());
233 if let Err(e) = memory::munprotect(
234 start,
235 conversions::pages_to_bytes(pages),
236 self.protect_memory_on_release.unwrap(),
237 ) {
238 panic!(
239 "Failed at unprotecting memory (starting at {}): {:?}",
240 start, e
241 );
242 }
243 }
244
245 pub(crate) fn allocate_one_chunk_no_commit(
246 &self,
247 space_descriptor: SpaceDescriptor,
248 ) -> Result<PRAllocResult, PRAllocFail> {
249 assert!(self.common.growable);
250 let mut sync = self.sync.lock().unwrap();
252 let page_offset =
253 unsafe { self.allocate_contiguous_chunks(space_descriptor, PAGES_IN_CHUNK, &mut sync) };
254
255 if page_offset == freelist::FAILURE {
256 return Result::Err(PRAllocFail);
257 } else {
258 sync.pages_currently_on_freelist -= PAGES_IN_CHUNK;
259 if page_offset > sync.highwater_mark {
260 sync.highwater_mark = page_offset;
261 }
262 }
263
264 let rtn = sync.start + conversions::pages_to_bytes(page_offset as _);
265 Result::Ok(PRAllocResult {
266 start: rtn,
267 pages: PAGES_IN_CHUNK,
268 new_chunk: true,
269 })
270 }
271
272 unsafe fn allocate_contiguous_chunks(
273 &self,
274 space_descriptor: SpaceDescriptor,
275 pages: usize,
276 sync: &mut MutexGuard<FreeListPageResourceSync>,
277 ) -> i32 {
278 let mut rtn = freelist::FAILURE;
279 let required_chunks = crate::policy::space::required_chunks(pages);
280 let region = self.common.grow_discontiguous_space(
281 space_descriptor,
282 required_chunks,
283 Some(sync.free_list.as_mut()),
284 );
285
286 if !region.is_zero() {
287 let region_start = conversions::bytes_to_pages_up(region - sync.start);
288 let region_end = region_start + (required_chunks * PAGES_IN_CHUNK) - 1;
289 sync.free_list.set_uncoalescable(region_start as _);
290 sync.free_list.set_uncoalescable(region_end as i32 + 1);
291 for p in (region_start..region_end).step_by(PAGES_IN_CHUNK) {
292 if p != region_start {
293 sync.free_list.clear_uncoalescable(p as _);
294 }
295 let liberated = sync.free_list.free(p as _, true); debug_assert!(liberated as usize == PAGES_IN_CHUNK + (p - region_start));
297 sync.pages_currently_on_freelist += PAGES_IN_CHUNK;
298 }
299 rtn = sync.free_list.alloc(pages as _); }
301
302 rtn
303 }
304
305 unsafe fn free_contiguous_chunk(&self, chunk: Address, sync: &mut FreeListPageResourceSync) {
306 let num_chunks = self.vm_map().get_contiguous_region_chunks(chunk);
307 let mut chunk_start = conversions::bytes_to_pages_up(chunk - sync.start);
309 let chunk_end = chunk_start + (num_chunks * PAGES_IN_CHUNK);
310 while chunk_start < chunk_end {
311 sync.free_list.set_uncoalescable(chunk_start as _);
312 let tmp = sync
313 .free_list
314 .alloc_from_unit(PAGES_IN_CHUNK as _, chunk_start as _)
315 as usize; debug_assert!(tmp == chunk_start);
317 chunk_start += PAGES_IN_CHUNK;
318 sync.pages_currently_on_freelist -= PAGES_IN_CHUNK;
319 }
320 self.common.release_discontiguous_chunks(chunk);
323 }
324
325 pub fn release_pages(&self, first: Address) {
334 debug_assert!(conversions::is_page_aligned(first));
335 let mut sync = self.sync.lock().unwrap();
336 let page_offset = conversions::bytes_to_pages_up(first - sync.start);
337 let pages = sync.free_list.size(page_offset as _);
338 debug_assert!(pages as usize <= self.common.accounting.get_committed_pages());
341
342 if self.protect_memory_on_release.is_some() {
343 self.mprotect(first, pages as _);
344 }
345
346 self.common.accounting.release(pages as _);
347 let freed = sync.free_list.free(page_offset as _, true);
348 sync.pages_currently_on_freelist += pages as usize;
349 if !self.common.contiguous {
350 self.release_free_chunks(first, freed as _, &mut sync);
352 }
353 }
354
355 fn release_free_chunks(
356 &self,
357 freed_page: Address,
358 pages_freed: usize,
359 sync: &mut FreeListPageResourceSync,
360 ) {
361 let page_offset = conversions::bytes_to_pages_up(freed_page - sync.start);
362
363 if pages_freed % PAGES_IN_CHUNK == 0 {
365 let mut region_start = page_offset & !(PAGES_IN_CHUNK - 1);
368 let mut next_region_start = region_start + PAGES_IN_CHUNK;
369 while sync.free_list.is_coalescable(region_start as _) {
371 region_start -= PAGES_IN_CHUNK;
373 }
374 while next_region_start < freelist::MAX_UNITS as usize
375 && sync.free_list.is_coalescable(next_region_start as _)
376 {
377 next_region_start += PAGES_IN_CHUNK;
378 }
379 debug_assert!(next_region_start < freelist::MAX_UNITS as usize);
380 if pages_freed == next_region_start - region_start {
381 let start = sync.start;
382 unsafe {
383 self.free_contiguous_chunk(
384 start + conversions::pages_to_bytes(region_start),
385 sync,
386 );
387 }
388 }
389 }
390 }
391}