mmtk/util/heap/
freelistpageresource.rs

1use std::sync::{Mutex, MutexGuard};
2
3use super::layout::vm_layout::PAGES_IN_CHUNK;
4use super::layout::VMMap;
5use super::pageresource::{PRAllocFail, PRAllocResult};
6use super::PageResource;
7use crate::mmtk::MMAPPER;
8use crate::util::address::Address;
9use crate::util::alloc::embedded_meta_data::*;
10use crate::util::conversions;
11use crate::util::freelist;
12use crate::util::freelist::FreeList;
13use crate::util::heap::layout::vm_layout::*;
14use crate::util::heap::layout::CreateFreeListResult;
15use crate::util::heap::pageresource::CommonPageResource;
16use crate::util::heap::space_descriptor::SpaceDescriptor;
17use crate::util::memory;
18use crate::util::opaque_pointer::*;
19use crate::util::raw_memory_freelist::RawMemoryFreeList;
20use crate::vm::*;
21use std::marker::PhantomData;
22
23const UNINITIALIZED_WATER_MARK: i32 = -1;
24
25pub struct FreeListPageResource<VM: VMBinding> {
26    common: CommonPageResource,
27    sync: Mutex<FreeListPageResourceSync>,
28    _p: PhantomData<VM>,
29    /// Protect memory on release, and unprotect on re-allocate.
30    pub(crate) protect_memory_on_release: Option<memory::MmapProtection>,
31}
32
33unsafe impl<VM: VMBinding> Send for FreeListPageResource<VM> {}
34unsafe impl<VM: VMBinding> Sync for FreeListPageResource<VM> {}
35
36struct FreeListPageResourceSync {
37    pub(crate) free_list: Box<dyn FreeList>,
38    pages_currently_on_freelist: usize,
39    start: Address,
40    highwater_mark: i32,
41}
42
43impl<VM: VMBinding> PageResource<VM> for FreeListPageResource<VM> {
44    fn common(&self) -> &CommonPageResource {
45        &self.common
46    }
47    fn common_mut(&mut self) -> &mut CommonPageResource {
48        &mut self.common
49    }
50    fn update_discontiguous_start(&mut self, start: Address) {
51        // Only discontiguous FreeListPageResource needs adjustment.
52        if !self.common.contiguous {
53            // The adjustment happens when we still have a `&mut MMTK`.
54            // We bypass the mutex lock by calling `get_mut`.
55            let sync = self.sync.get_mut().unwrap();
56            sync.start = start.align_up(BYTES_IN_REGION);
57        }
58    }
59
60    fn get_available_physical_pages(&self) -> usize {
61        let mut rtn = {
62            let sync = self.sync.lock().unwrap();
63            sync.pages_currently_on_freelist
64        };
65
66        if !self.common.contiguous {
67            let chunks: usize = self
68                .common
69                .vm_map
70                .get_available_discontiguous_chunks()
71                .saturating_sub(self.common.vm_map.get_chunk_consumer_count());
72            rtn += chunks * PAGES_IN_CHUNK;
73        } else if self.common.growable && cfg!(target_pointer_width = "64") {
74            rtn = vm_layout().pages_in_space64() - self.reserved_pages();
75        }
76
77        rtn
78    }
79
80    fn alloc_pages(
81        &self,
82        space_descriptor: SpaceDescriptor,
83        reserved_pages: usize,
84        required_pages: usize,
85        tls: VMThread,
86    ) -> Result<PRAllocResult, PRAllocFail> {
87        let mut sync = self.sync.lock().unwrap();
88        let mut new_chunk = false;
89        let mut page_offset = sync.free_list.alloc(required_pages as _);
90        if page_offset == freelist::FAILURE && self.common.growable {
91            page_offset = unsafe {
92                self.allocate_contiguous_chunks(space_descriptor, required_pages, &mut sync)
93            };
94            new_chunk = true;
95        }
96
97        if page_offset == freelist::FAILURE {
98            return Result::Err(PRAllocFail);
99        } else {
100            sync.pages_currently_on_freelist -= required_pages;
101            if page_offset > sync.highwater_mark {
102                if sync.highwater_mark == UNINITIALIZED_WATER_MARK
103                    || (page_offset ^ sync.highwater_mark) > PAGES_IN_REGION as i32
104                {
105                    new_chunk = true;
106                }
107                sync.highwater_mark = page_offset;
108            }
109        }
110
111        let rtn = sync.start + conversions::pages_to_bytes(page_offset as _);
112        // The meta-data portion of reserved Pages was committed above.
113        self.commit_pages(reserved_pages, required_pages, tls);
114        if self.protect_memory_on_release.is_some() {
115            if !new_chunk {
116                // This check is necessary to prevent us from mprotecting an address that is not yet mapped by mmapper.
117                // See https://github.com/mmtk/mmtk-core/issues/400.
118                // It is possible that one thread gets a new chunk, and returns from this function. However, the Space.acquire()
119                // has not yet call ensure_mapped() for it. So the chunk is not yet mmapped. At this point, if another thread calls
120                // this function, and get a few more pages from the same chunk, it is no longer seen as 'new_chunk', and we
121                // will try to munprotect on it. But the chunk may not yet be mapped.
122                //
123                // If we want to improve and get rid of this loop, we need to move this munprotect to anywhere after the ensure_mapped() call
124                // in Space.acquire(). We can either move it the option of 'protect_on_release' to space, or have a call to page resource
125                // after ensure_mapped(). However, I think this is sufficient given that this option is only used for PageProtect for debugging use.
126                while !new_chunk && !MMAPPER.is_mapped_address(rtn) {}
127                self.munprotect(rtn, sync.free_list.size(page_offset as _) as _)
128            } else if !self.common.contiguous && new_chunk {
129                // Don't unprotect if this is a new unmapped discontiguous chunk
130                // For a new mapped discontiguous chunk, this should previously be released and protected by us.
131                // We still need to unprotect it.
132                if MMAPPER.is_mapped_address(rtn) {
133                    self.munprotect(rtn, sync.free_list.size(page_offset as _) as _)
134                }
135            }
136        };
137        Result::Ok(PRAllocResult {
138            start: rtn,
139            pages: required_pages,
140            new_chunk,
141        })
142    }
143}
144
145impl<VM: VMBinding> FreeListPageResource<VM> {
146    pub fn new_contiguous(start: Address, bytes: usize, vm_map: &'static dyn VMMap) -> Self {
147        let pages = conversions::bytes_to_pages_up(bytes);
148        let CreateFreeListResult {
149            free_list,
150            space_displacement,
151        } = vm_map.create_parent_freelist(start, pages, PAGES_IN_REGION as _);
152
153        // If it is RawMemoryFreeList, it will occupy `space_displacement` bytes at the start of
154        // the space.  We add it to the start address.
155        let actual_start = start + space_displacement;
156        debug!(
157            "  in new_contiguous: space_displacement = {:?}, actual_start = {}",
158            space_displacement, actual_start
159        );
160
161        let growable = cfg!(target_pointer_width = "64");
162        FreeListPageResource {
163            common: CommonPageResource::new(true, growable, vm_map),
164            sync: Mutex::new(FreeListPageResourceSync {
165                free_list,
166                pages_currently_on_freelist: if growable { 0 } else { pages },
167                start: actual_start,
168                highwater_mark: UNINITIALIZED_WATER_MARK,
169            }),
170            _p: PhantomData,
171            protect_memory_on_release: None,
172        }
173    }
174
175    pub fn new_discontiguous(vm_map: &'static dyn VMMap) -> Self {
176        // This is a place-holder value that is used by neither `vm_map.create_freelist` nor the
177        // space.  The location of discontiguous spaces is not determined before all contiguous
178        // spaces are places, at which time the starting address of discontiguous spaces will be
179        // updated to the correct value.
180        let start = vm_layout().available_start();
181
182        let CreateFreeListResult {
183            free_list,
184            space_displacement,
185        } = vm_map.create_freelist(start);
186
187        // In theory, nothing prevents us from using `RawMemoryFreeList` for discontiguous spaces.
188        // But in the current implementation, only `Map32` supports discontiguous spaces, and
189        // `Map32` only uses `IntArrayFreeList`.
190        debug_assert!(
191            free_list.downcast_ref::<RawMemoryFreeList>().is_none(),
192            "We can't allocate RawMemoryFreeList for discontiguous spaces."
193        );
194
195        // Discontiguous free list page resources are only used by `Map32` which uses
196        // `IntArrayFreeList` exclusively.  It does not have space displacement.
197        debug_assert_eq!(space_displacement, 0);
198        debug!("new_discontiguous. start: {start})");
199
200        FreeListPageResource {
201            common: CommonPageResource::new(false, true, vm_map),
202            sync: Mutex::new(FreeListPageResourceSync {
203                free_list,
204                pages_currently_on_freelist: 0,
205                start,
206                highwater_mark: UNINITIALIZED_WATER_MARK,
207            }),
208            _p: PhantomData,
209            protect_memory_on_release: None,
210        }
211    }
212
213    /// Protect the memory
214    fn mprotect(&self, start: Address, pages: usize) {
215        // We may fail here for ENOMEM, especially in PageProtect plan.
216        // See: https://man7.org/linux/man-pages/man2/mprotect.2.html#ERRORS
217        // > Changing the protection of a memory region would result in
218        // > the total number of mappings with distinct attributes
219        // > (e.g., read versus read/write protection) exceeding the
220        // > allowed maximum.
221        assert!(self.protect_memory_on_release.is_some());
222        if let Err(e) = memory::mprotect(start, conversions::pages_to_bytes(pages)) {
223            panic!(
224                "Failed at protecting memory (starting at {}): {:?}",
225                start, e
226            );
227        }
228    }
229
230    /// Unprotect the memory
231    fn munprotect(&self, start: Address, pages: usize) {
232        assert!(self.protect_memory_on_release.is_some());
233        if let Err(e) = memory::munprotect(
234            start,
235            conversions::pages_to_bytes(pages),
236            self.protect_memory_on_release.unwrap(),
237        ) {
238            panic!(
239                "Failed at unprotecting memory (starting at {}): {:?}",
240                start, e
241            );
242        }
243    }
244
245    pub(crate) fn allocate_one_chunk_no_commit(
246        &self,
247        space_descriptor: SpaceDescriptor,
248    ) -> Result<PRAllocResult, PRAllocFail> {
249        assert!(self.common.growable);
250        // FIXME: We need a safe implementation
251        let mut sync = self.sync.lock().unwrap();
252        let page_offset =
253            unsafe { self.allocate_contiguous_chunks(space_descriptor, PAGES_IN_CHUNK, &mut sync) };
254
255        if page_offset == freelist::FAILURE {
256            return Result::Err(PRAllocFail);
257        } else {
258            sync.pages_currently_on_freelist -= PAGES_IN_CHUNK;
259            if page_offset > sync.highwater_mark {
260                sync.highwater_mark = page_offset;
261            }
262        }
263
264        let rtn = sync.start + conversions::pages_to_bytes(page_offset as _);
265        Result::Ok(PRAllocResult {
266            start: rtn,
267            pages: PAGES_IN_CHUNK,
268            new_chunk: true,
269        })
270    }
271
272    unsafe fn allocate_contiguous_chunks(
273        &self,
274        space_descriptor: SpaceDescriptor,
275        pages: usize,
276        sync: &mut MutexGuard<FreeListPageResourceSync>,
277    ) -> i32 {
278        let mut rtn = freelist::FAILURE;
279        let required_chunks = crate::policy::space::required_chunks(pages);
280        let region = self.common.grow_discontiguous_space(
281            space_descriptor,
282            required_chunks,
283            Some(sync.free_list.as_mut()),
284        );
285
286        if !region.is_zero() {
287            let region_start = conversions::bytes_to_pages_up(region - sync.start);
288            let region_end = region_start + (required_chunks * PAGES_IN_CHUNK) - 1;
289            sync.free_list.set_uncoalescable(region_start as _);
290            sync.free_list.set_uncoalescable(region_end as i32 + 1);
291            for p in (region_start..region_end).step_by(PAGES_IN_CHUNK) {
292                if p != region_start {
293                    sync.free_list.clear_uncoalescable(p as _);
294                }
295                let liberated = sync.free_list.free(p as _, true); // add chunk to our free list
296                debug_assert!(liberated as usize == PAGES_IN_CHUNK + (p - region_start));
297                sync.pages_currently_on_freelist += PAGES_IN_CHUNK;
298            }
299            rtn = sync.free_list.alloc(pages as _); // re-do the request which triggered this call
300        }
301
302        rtn
303    }
304
305    unsafe fn free_contiguous_chunk(&self, chunk: Address, sync: &mut FreeListPageResourceSync) {
306        let num_chunks = self.vm_map().get_contiguous_region_chunks(chunk);
307        /* nail down all pages associated with the chunk, so it is no longer on our free list */
308        let mut chunk_start = conversions::bytes_to_pages_up(chunk - sync.start);
309        let chunk_end = chunk_start + (num_chunks * PAGES_IN_CHUNK);
310        while chunk_start < chunk_end {
311            sync.free_list.set_uncoalescable(chunk_start as _);
312            let tmp = sync
313                .free_list
314                .alloc_from_unit(PAGES_IN_CHUNK as _, chunk_start as _)
315                as usize; // then alloc the entire chunk
316            debug_assert!(tmp == chunk_start);
317            chunk_start += PAGES_IN_CHUNK;
318            sync.pages_currently_on_freelist -= PAGES_IN_CHUNK;
319        }
320        /* now return the address space associated with the chunk for global reuse */
321
322        self.common.release_discontiguous_chunks(chunk);
323    }
324
325    /// Release pages previously allocated by `alloc_pages`.
326    ///
327    /// Warning: This method acquires the mutex `self.sync`.  If multiple threads release pages
328    /// concurrently, the lock contention will become a performance bottleneck.  This is especially
329    /// problematic for plans that sweep objects in bulk in the `Release` stage.  Spaces except the
330    /// large object space are recommended to use [`BlockPageResource`] whenever possible.
331    ///
332    /// [`BlockPageResource`]: crate::util::heap::blockpageresource::BlockPageResource
333    pub fn release_pages(&self, first: Address) {
334        debug_assert!(conversions::is_page_aligned(first));
335        let mut sync = self.sync.lock().unwrap();
336        let page_offset = conversions::bytes_to_pages_up(first - sync.start);
337        let pages = sync.free_list.size(page_offset as _);
338        // if (VM.config.ZERO_PAGES_ON_RELEASE)
339        //     VM.memory.zero(false, first, Conversions.pagesToBytes(pages));
340        debug_assert!(pages as usize <= self.common.accounting.get_committed_pages());
341
342        if self.protect_memory_on_release.is_some() {
343            self.mprotect(first, pages as _);
344        }
345
346        self.common.accounting.release(pages as _);
347        let freed = sync.free_list.free(page_offset as _, true);
348        sync.pages_currently_on_freelist += pages as usize;
349        if !self.common.contiguous {
350            // only discontiguous spaces use chunks
351            self.release_free_chunks(first, freed as _, &mut sync);
352        }
353    }
354
355    fn release_free_chunks(
356        &self,
357        freed_page: Address,
358        pages_freed: usize,
359        sync: &mut FreeListPageResourceSync,
360    ) {
361        let page_offset = conversions::bytes_to_pages_up(freed_page - sync.start);
362
363        // may be multiple chunks
364        if pages_freed % PAGES_IN_CHUNK == 0 {
365            // necessary, but not sufficient condition
366            /* grow a region of chunks, starting with the chunk containing the freed page */
367            let mut region_start = page_offset & !(PAGES_IN_CHUNK - 1);
368            let mut next_region_start = region_start + PAGES_IN_CHUNK;
369            /* now try to grow (end point pages are marked as non-coalescing) */
370            while sync.free_list.is_coalescable(region_start as _) {
371                // region_start is guaranteed to be positive. Otherwise this line will fail due to subtraction overflow.
372                region_start -= PAGES_IN_CHUNK;
373            }
374            while next_region_start < freelist::MAX_UNITS as usize
375                && sync.free_list.is_coalescable(next_region_start as _)
376            {
377                next_region_start += PAGES_IN_CHUNK;
378            }
379            debug_assert!(next_region_start < freelist::MAX_UNITS as usize);
380            if pages_freed == next_region_start - region_start {
381                let start = sync.start;
382                unsafe {
383                    self.free_contiguous_chunk(
384                        start + conversions::pages_to_bytes(region_start),
385                        sync,
386                    );
387                }
388            }
389        }
390    }
391}