1use std::sync::{Mutex, MutexGuard};
2
3use super::layout::vm_layout::PAGES_IN_CHUNK;
4use super::layout::VMMap;
5use super::pageresource::{PRAllocFail, PRAllocResult};
6use super::PageResource;
7use crate::mmtk::MMAPPER;
8use crate::util::address::Address;
9use crate::util::alloc::embedded_meta_data::*;
10use crate::util::conversions;
11use crate::util::freelist;
12use crate::util::freelist::FreeList;
13use crate::util::heap::layout::vm_layout::*;
14use crate::util::heap::layout::CreateFreeListResult;
15use crate::util::heap::pageresource::CommonPageResource;
16use crate::util::heap::space_descriptor::SpaceDescriptor;
17use crate::util::opaque_pointer::*;
18use crate::util::os::*;
19use crate::util::raw_memory_freelist::RawMemoryFreeList;
20use crate::vm::*;
21use std::marker::PhantomData;
22
23const UNINITIALIZED_WATER_MARK: i32 = -1;
24
25pub struct FreeListPageResource<VM: VMBinding> {
26 common: CommonPageResource,
27 sync: Mutex<FreeListPageResourceSync>,
28 _p: PhantomData<VM>,
29 pub(crate) protect_memory_on_release: Option<MmapProtection>,
31}
32
33unsafe impl<VM: VMBinding> Send for FreeListPageResource<VM> {}
34unsafe impl<VM: VMBinding> Sync for FreeListPageResource<VM> {}
35
36struct FreeListPageResourceSync {
37 pub(crate) free_list: Box<dyn FreeList>,
38 pages_currently_on_freelist: usize,
39 start: Address,
40 highwater_mark: i32,
41}
42
43impl<VM: VMBinding> PageResource<VM> for FreeListPageResource<VM> {
44 fn common(&self) -> &CommonPageResource {
45 &self.common
46 }
47 fn common_mut(&mut self) -> &mut CommonPageResource {
48 &mut self.common
49 }
50 fn update_discontiguous_start(&mut self, start: Address) {
51 if !self.common.contiguous {
53 let sync = self.sync.get_mut().unwrap();
56 sync.start = start.align_up(BYTES_IN_REGION);
57 }
58 }
59
60 fn get_available_physical_pages(&self) -> usize {
61 let mut rtn = {
62 let sync = self.sync.lock().unwrap();
63 sync.pages_currently_on_freelist
64 };
65
66 if !self.common.contiguous {
67 let chunks: usize = self
68 .common
69 .vm_map
70 .get_available_discontiguous_chunks()
71 .saturating_sub(self.common.vm_map.get_chunk_consumer_count());
72 rtn += chunks * PAGES_IN_CHUNK;
73 } else if self.common.growable && cfg!(target_pointer_width = "64") {
74 rtn = vm_layout().pages_in_space64() - self.reserved_pages();
75 }
76
77 rtn
78 }
79
80 fn alloc_pages(
81 &self,
82 space_descriptor: SpaceDescriptor,
83 reserved_pages: usize,
84 required_pages: usize,
85 tls: VMThread,
86 ) -> Result<PRAllocResult, PRAllocFail> {
87 let mut sync = self.sync.lock().unwrap();
88 let mut new_chunk = false;
89 let mut page_offset = sync.free_list.alloc(required_pages as _);
90 if page_offset == freelist::FAILURE && self.common.growable {
91 page_offset = unsafe {
92 self.allocate_contiguous_chunks(space_descriptor, required_pages, &mut sync)
93 };
94 new_chunk = true;
95 }
96
97 if page_offset == freelist::FAILURE {
98 return Result::Err(PRAllocFail);
99 } else {
100 sync.pages_currently_on_freelist -= required_pages;
101 if page_offset > sync.highwater_mark {
102 if sync.highwater_mark == UNINITIALIZED_WATER_MARK
103 || (page_offset ^ sync.highwater_mark) > PAGES_IN_REGION as i32
104 {
105 new_chunk = true;
106 }
107 sync.highwater_mark = page_offset;
108 }
109 }
110
111 let rtn = sync.start + conversions::pages_to_bytes(page_offset as _);
112 self.commit_pages(reserved_pages, required_pages, tls);
114 if self.protect_memory_on_release.is_some() {
115 if !new_chunk {
116 while !new_chunk && !MMAPPER.is_mapped_address(rtn) {}
127 self.munprotect(rtn, sync.free_list.size(page_offset as _) as _)
128 } else if !self.common.contiguous && new_chunk {
129 if MMAPPER.is_mapped_address(rtn) {
133 self.munprotect(rtn, sync.free_list.size(page_offset as _) as _)
134 }
135 }
136 };
137 Result::Ok(PRAllocResult {
138 start: rtn,
139 pages: required_pages,
140 new_chunk,
141 })
142 }
143}
144
145impl<VM: VMBinding> FreeListPageResource<VM> {
146 pub fn new_contiguous(start: Address, bytes: usize, vm_map: &'static dyn VMMap) -> Self {
147 let pages = conversions::bytes_to_pages_up(bytes);
148 let CreateFreeListResult {
149 free_list,
150 space_displacement,
151 } = vm_map.create_parent_freelist(start, pages, PAGES_IN_REGION as _);
152
153 let actual_start = start + space_displacement;
156 debug!(
157 " in new_contiguous: space_displacement = {:?}, actual_start = {}",
158 space_displacement, actual_start
159 );
160
161 let growable = cfg!(target_pointer_width = "64");
162 FreeListPageResource {
163 common: CommonPageResource::new(true, growable, vm_map),
164 sync: Mutex::new(FreeListPageResourceSync {
165 free_list,
166 pages_currently_on_freelist: if growable { 0 } else { pages },
167 start: actual_start,
168 highwater_mark: UNINITIALIZED_WATER_MARK,
169 }),
170 _p: PhantomData,
171 protect_memory_on_release: None,
172 }
173 }
174
175 pub fn new_discontiguous(vm_map: &'static dyn VMMap) -> Self {
176 let start = vm_layout().available_start();
181
182 let CreateFreeListResult {
183 free_list,
184 space_displacement,
185 } = vm_map.create_freelist(start);
186
187 debug_assert!(
191 free_list.downcast_ref::<RawMemoryFreeList>().is_none(),
192 "We can't allocate RawMemoryFreeList for discontiguous spaces."
193 );
194
195 debug_assert_eq!(space_displacement, 0);
198 debug!("new_discontiguous. start: {start})");
199
200 FreeListPageResource {
201 common: CommonPageResource::new(false, true, vm_map),
202 sync: Mutex::new(FreeListPageResourceSync {
203 free_list,
204 pages_currently_on_freelist: 0,
205 start,
206 highwater_mark: UNINITIALIZED_WATER_MARK,
207 }),
208 _p: PhantomData,
209 protect_memory_on_release: None,
210 }
211 }
212
213 fn mprotect(&self, start: Address, pages: usize) {
215 assert!(self.protect_memory_on_release.is_some());
222 if let Err(e) = OS::set_memory_access(
223 start,
224 conversions::pages_to_bytes(pages),
225 MmapProtection::NoAccess,
226 ) {
227 panic!(
228 "Failed at protecting memory (starting at {}): {:?}",
229 start, e
230 );
231 }
232 }
233
234 fn munprotect(&self, start: Address, pages: usize) {
236 assert!(self.protect_memory_on_release.is_some());
237 if let Err(e) = OS::set_memory_access(
238 start,
239 conversions::pages_to_bytes(pages),
240 self.protect_memory_on_release.unwrap(),
241 ) {
242 panic!(
243 "Failed at unprotecting memory (starting at {}): {:?}",
244 start, e
245 );
246 }
247 }
248
249 pub(crate) fn allocate_one_chunk_no_commit(
250 &self,
251 space_descriptor: SpaceDescriptor,
252 ) -> Result<PRAllocResult, PRAllocFail> {
253 assert!(self.common.growable);
254 let mut sync = self.sync.lock().unwrap();
256 let page_offset =
257 unsafe { self.allocate_contiguous_chunks(space_descriptor, PAGES_IN_CHUNK, &mut sync) };
258
259 if page_offset == freelist::FAILURE {
260 return Result::Err(PRAllocFail);
261 } else {
262 sync.pages_currently_on_freelist -= PAGES_IN_CHUNK;
263 if page_offset > sync.highwater_mark {
264 sync.highwater_mark = page_offset;
265 }
266 }
267
268 let rtn = sync.start + conversions::pages_to_bytes(page_offset as _);
269 Result::Ok(PRAllocResult {
270 start: rtn,
271 pages: PAGES_IN_CHUNK,
272 new_chunk: true,
273 })
274 }
275
276 unsafe fn allocate_contiguous_chunks(
277 &self,
278 space_descriptor: SpaceDescriptor,
279 pages: usize,
280 sync: &mut MutexGuard<FreeListPageResourceSync>,
281 ) -> i32 {
282 let mut rtn = freelist::FAILURE;
283 let required_chunks = crate::policy::space::required_chunks(pages);
284 let region = self.common.grow_discontiguous_space(
285 space_descriptor,
286 required_chunks,
287 Some(sync.free_list.as_mut()),
288 );
289
290 if !region.is_zero() {
291 let region_start = conversions::bytes_to_pages_up(region - sync.start);
292 let region_end = region_start + (required_chunks * PAGES_IN_CHUNK) - 1;
293 sync.free_list.set_uncoalescable(region_start as _);
294 sync.free_list.set_uncoalescable(region_end as i32 + 1);
295 for p in (region_start..region_end).step_by(PAGES_IN_CHUNK) {
296 if p != region_start {
297 sync.free_list.clear_uncoalescable(p as _);
298 }
299 let liberated = sync.free_list.free(p as _, true); debug_assert!(liberated as usize == PAGES_IN_CHUNK + (p - region_start));
301 sync.pages_currently_on_freelist += PAGES_IN_CHUNK;
302 }
303 rtn = sync.free_list.alloc(pages as _); }
305
306 rtn
307 }
308
309 unsafe fn free_contiguous_chunk(&self, chunk: Address, sync: &mut FreeListPageResourceSync) {
310 let num_chunks = self.vm_map().get_contiguous_region_chunks(chunk);
311 let mut chunk_start = conversions::bytes_to_pages_up(chunk - sync.start);
313 let chunk_end = chunk_start + (num_chunks * PAGES_IN_CHUNK);
314 while chunk_start < chunk_end {
315 sync.free_list.set_uncoalescable(chunk_start as _);
316 let tmp = sync
317 .free_list
318 .alloc_from_unit(PAGES_IN_CHUNK as _, chunk_start as _)
319 as usize; debug_assert!(tmp == chunk_start);
321 chunk_start += PAGES_IN_CHUNK;
322 sync.pages_currently_on_freelist -= PAGES_IN_CHUNK;
323 }
324 self.common.release_discontiguous_chunks(chunk);
327 }
328
329 pub fn release_pages(&self, first: Address) {
338 debug_assert!(conversions::is_page_aligned(first));
339 let mut sync = self.sync.lock().unwrap();
340 let page_offset = conversions::bytes_to_pages_up(first - sync.start);
341 let pages = sync.free_list.size(page_offset as _);
342 debug_assert!(pages as usize <= self.common.accounting.get_committed_pages());
345
346 if self.protect_memory_on_release.is_some() {
347 self.mprotect(first, pages as _);
348 }
349
350 self.common.accounting.release(pages as _);
351 let freed = sync.free_list.free(page_offset as _, true);
352 sync.pages_currently_on_freelist += pages as usize;
353 if !self.common.contiguous {
354 self.release_free_chunks(first, freed as _, &mut sync);
356 }
357 }
358
359 fn release_free_chunks(
360 &self,
361 freed_page: Address,
362 pages_freed: usize,
363 sync: &mut FreeListPageResourceSync,
364 ) {
365 let page_offset = conversions::bytes_to_pages_up(freed_page - sync.start);
366
367 if pages_freed % PAGES_IN_CHUNK == 0 {
369 let mut region_start = page_offset & !(PAGES_IN_CHUNK - 1);
372 let mut next_region_start = region_start + PAGES_IN_CHUNK;
373 while sync.free_list.is_coalescable(region_start as _) {
375 region_start -= PAGES_IN_CHUNK;
377 }
378 while next_region_start < freelist::MAX_UNITS as usize
379 && sync.free_list.is_coalescable(next_region_start as _)
380 {
381 next_region_start += PAGES_IN_CHUNK;
382 }
383 debug_assert!(next_region_start < freelist::MAX_UNITS as usize);
384 if pages_freed == next_region_start - region_start {
385 let start = sync.start;
386 unsafe {
387 self.free_contiguous_chunk(
388 start + conversions::pages_to_bytes(region_start),
389 sync,
390 );
391 }
392 }
393 }
394 }
395}