mmtk/util/heap/
freelistpageresource.rs

1use std::sync::{Mutex, MutexGuard};
2
3use super::layout::vm_layout::PAGES_IN_CHUNK;
4use super::layout::VMMap;
5use super::pageresource::{PRAllocFail, PRAllocResult};
6use super::PageResource;
7use crate::mmtk::MMAPPER;
8use crate::util::address::Address;
9use crate::util::alloc::embedded_meta_data::*;
10use crate::util::conversions;
11use crate::util::freelist;
12use crate::util::freelist::FreeList;
13use crate::util::heap::layout::vm_layout::*;
14use crate::util::heap::layout::CreateFreeListResult;
15use crate::util::heap::pageresource::CommonPageResource;
16use crate::util::heap::space_descriptor::SpaceDescriptor;
17use crate::util::opaque_pointer::*;
18use crate::util::os::*;
19use crate::util::raw_memory_freelist::RawMemoryFreeList;
20use crate::vm::*;
21use std::marker::PhantomData;
22
23const UNINITIALIZED_WATER_MARK: i32 = -1;
24
25pub struct FreeListPageResource<VM: VMBinding> {
26    common: CommonPageResource,
27    sync: Mutex<FreeListPageResourceSync>,
28    _p: PhantomData<VM>,
29    /// Protect memory on release, and unprotect on re-allocate.
30    pub(crate) protect_memory_on_release: Option<MmapProtection>,
31}
32
33unsafe impl<VM: VMBinding> Send for FreeListPageResource<VM> {}
34unsafe impl<VM: VMBinding> Sync for FreeListPageResource<VM> {}
35
36struct FreeListPageResourceSync {
37    pub(crate) free_list: Box<dyn FreeList>,
38    pages_currently_on_freelist: usize,
39    start: Address,
40    highwater_mark: i32,
41}
42
43impl<VM: VMBinding> PageResource<VM> for FreeListPageResource<VM> {
44    fn common(&self) -> &CommonPageResource {
45        &self.common
46    }
47    fn common_mut(&mut self) -> &mut CommonPageResource {
48        &mut self.common
49    }
50    fn update_discontiguous_start(&mut self, start: Address) {
51        // Only discontiguous FreeListPageResource needs adjustment.
52        if !self.common.contiguous {
53            // The adjustment happens when we still have a `&mut MMTK`.
54            // We bypass the mutex lock by calling `get_mut`.
55            let sync = self.sync.get_mut().unwrap();
56            sync.start = start.align_up(BYTES_IN_REGION);
57        }
58    }
59
60    fn get_available_physical_pages(&self) -> usize {
61        let mut rtn = {
62            let sync = self.sync.lock().unwrap();
63            sync.pages_currently_on_freelist
64        };
65
66        if !self.common.contiguous {
67            let chunks: usize = self
68                .common
69                .vm_map
70                .get_available_discontiguous_chunks()
71                .saturating_sub(self.common.vm_map.get_chunk_consumer_count());
72            rtn += chunks * PAGES_IN_CHUNK;
73        } else if self.common.growable && cfg!(target_pointer_width = "64") {
74            rtn = vm_layout().pages_in_space64() - self.reserved_pages();
75        }
76
77        rtn
78    }
79
80    fn alloc_pages(
81        &self,
82        space_descriptor: SpaceDescriptor,
83        reserved_pages: usize,
84        required_pages: usize,
85        tls: VMThread,
86    ) -> Result<PRAllocResult, PRAllocFail> {
87        let mut sync = self.sync.lock().unwrap();
88        let mut new_chunk = false;
89        let mut page_offset = sync.free_list.alloc(required_pages as _);
90        if page_offset == freelist::FAILURE && self.common.growable {
91            page_offset = unsafe {
92                self.allocate_contiguous_chunks(space_descriptor, required_pages, &mut sync)
93            };
94            new_chunk = true;
95        }
96
97        if page_offset == freelist::FAILURE {
98            return Result::Err(PRAllocFail);
99        } else {
100            sync.pages_currently_on_freelist -= required_pages;
101            if page_offset > sync.highwater_mark {
102                if sync.highwater_mark == UNINITIALIZED_WATER_MARK
103                    || (page_offset ^ sync.highwater_mark) > PAGES_IN_REGION as i32
104                {
105                    new_chunk = true;
106                }
107                sync.highwater_mark = page_offset;
108            }
109        }
110
111        let rtn = sync.start + conversions::pages_to_bytes(page_offset as _);
112        // The meta-data portion of reserved Pages was committed above.
113        self.commit_pages(reserved_pages, required_pages, tls);
114        if self.protect_memory_on_release.is_some() {
115            if !new_chunk {
116                // This check is necessary to prevent us from mprotecting an address that is not yet mapped by mmapper.
117                // See https://github.com/mmtk/mmtk-core/issues/400.
118                // It is possible that one thread gets a new chunk, and returns from this function. However, the Space.acquire()
119                // has not yet call ensure_mapped() for it. So the chunk is not yet mmapped. At this point, if another thread calls
120                // this function, and get a few more pages from the same chunk, it is no longer seen as 'new_chunk', and we
121                // will try to munprotect on it. But the chunk may not yet be mapped.
122                //
123                // If we want to improve and get rid of this loop, we need to move this munprotect to anywhere after the ensure_mapped() call
124                // in Space.acquire(). We can either move it the option of 'protect_on_release' to space, or have a call to page resource
125                // after ensure_mapped(). However, I think this is sufficient given that this option is only used for PageProtect for debugging use.
126                while !new_chunk && !MMAPPER.is_mapped_address(rtn) {}
127                self.munprotect(rtn, sync.free_list.size(page_offset as _) as _)
128            } else if !self.common.contiguous && new_chunk {
129                // Don't unprotect if this is a new unmapped discontiguous chunk
130                // For a new mapped discontiguous chunk, this should previously be released and protected by us.
131                // We still need to unprotect it.
132                if MMAPPER.is_mapped_address(rtn) {
133                    self.munprotect(rtn, sync.free_list.size(page_offset as _) as _)
134                }
135            }
136        };
137        Result::Ok(PRAllocResult {
138            start: rtn,
139            pages: required_pages,
140            new_chunk,
141        })
142    }
143}
144
145impl<VM: VMBinding> FreeListPageResource<VM> {
146    pub fn new_contiguous(start: Address, bytes: usize, vm_map: &'static dyn VMMap) -> Self {
147        let pages = conversions::bytes_to_pages_up(bytes);
148        let CreateFreeListResult {
149            free_list,
150            space_displacement,
151        } = vm_map.create_parent_freelist(start, pages, PAGES_IN_REGION as _);
152
153        // If it is RawMemoryFreeList, it will occupy `space_displacement` bytes at the start of
154        // the space.  We add it to the start address.
155        let actual_start = start + space_displacement;
156        debug!(
157            "  in new_contiguous: space_displacement = {:?}, actual_start = {}",
158            space_displacement, actual_start
159        );
160
161        let growable = cfg!(target_pointer_width = "64");
162        FreeListPageResource {
163            common: CommonPageResource::new(true, growable, vm_map),
164            sync: Mutex::new(FreeListPageResourceSync {
165                free_list,
166                pages_currently_on_freelist: if growable { 0 } else { pages },
167                start: actual_start,
168                highwater_mark: UNINITIALIZED_WATER_MARK,
169            }),
170            _p: PhantomData,
171            protect_memory_on_release: None,
172        }
173    }
174
175    pub fn new_discontiguous(vm_map: &'static dyn VMMap) -> Self {
176        // This is a place-holder value that is used by neither `vm_map.create_freelist` nor the
177        // space.  The location of discontiguous spaces is not determined before all contiguous
178        // spaces are places, at which time the starting address of discontiguous spaces will be
179        // updated to the correct value.
180        let start = vm_layout().available_start();
181
182        let CreateFreeListResult {
183            free_list,
184            space_displacement,
185        } = vm_map.create_freelist(start);
186
187        // In theory, nothing prevents us from using `RawMemoryFreeList` for discontiguous spaces.
188        // But in the current implementation, only `Map32` supports discontiguous spaces, and
189        // `Map32` only uses `IntArrayFreeList`.
190        debug_assert!(
191            free_list.downcast_ref::<RawMemoryFreeList>().is_none(),
192            "We can't allocate RawMemoryFreeList for discontiguous spaces."
193        );
194
195        // Discontiguous free list page resources are only used by `Map32` which uses
196        // `IntArrayFreeList` exclusively.  It does not have space displacement.
197        debug_assert_eq!(space_displacement, 0);
198        debug!("new_discontiguous. start: {start})");
199
200        FreeListPageResource {
201            common: CommonPageResource::new(false, true, vm_map),
202            sync: Mutex::new(FreeListPageResourceSync {
203                free_list,
204                pages_currently_on_freelist: 0,
205                start,
206                highwater_mark: UNINITIALIZED_WATER_MARK,
207            }),
208            _p: PhantomData,
209            protect_memory_on_release: None,
210        }
211    }
212
213    /// Protect the memory
214    fn mprotect(&self, start: Address, pages: usize) {
215        // We may fail here for ENOMEM, especially in PageProtect plan.
216        // See: https://man7.org/linux/man-pages/man2/mprotect.2.html#ERRORS
217        // > Changing the protection of a memory region would result in
218        // > the total number of mappings with distinct attributes
219        // > (e.g., read versus read/write protection) exceeding the
220        // > allowed maximum.
221        assert!(self.protect_memory_on_release.is_some());
222        if let Err(e) = OS::set_memory_access(
223            start,
224            conversions::pages_to_bytes(pages),
225            MmapProtection::NoAccess,
226        ) {
227            panic!(
228                "Failed at protecting memory (starting at {}): {:?}",
229                start, e
230            );
231        }
232    }
233
234    /// Unprotect the memory
235    fn munprotect(&self, start: Address, pages: usize) {
236        assert!(self.protect_memory_on_release.is_some());
237        if let Err(e) = OS::set_memory_access(
238            start,
239            conversions::pages_to_bytes(pages),
240            self.protect_memory_on_release.unwrap(),
241        ) {
242            panic!(
243                "Failed at unprotecting memory (starting at {}): {:?}",
244                start, e
245            );
246        }
247    }
248
249    pub(crate) fn allocate_one_chunk_no_commit(
250        &self,
251        space_descriptor: SpaceDescriptor,
252    ) -> Result<PRAllocResult, PRAllocFail> {
253        assert!(self.common.growable);
254        // FIXME: We need a safe implementation
255        let mut sync = self.sync.lock().unwrap();
256        let page_offset =
257            unsafe { self.allocate_contiguous_chunks(space_descriptor, PAGES_IN_CHUNK, &mut sync) };
258
259        if page_offset == freelist::FAILURE {
260            return Result::Err(PRAllocFail);
261        } else {
262            sync.pages_currently_on_freelist -= PAGES_IN_CHUNK;
263            if page_offset > sync.highwater_mark {
264                sync.highwater_mark = page_offset;
265            }
266        }
267
268        let rtn = sync.start + conversions::pages_to_bytes(page_offset as _);
269        Result::Ok(PRAllocResult {
270            start: rtn,
271            pages: PAGES_IN_CHUNK,
272            new_chunk: true,
273        })
274    }
275
276    unsafe fn allocate_contiguous_chunks(
277        &self,
278        space_descriptor: SpaceDescriptor,
279        pages: usize,
280        sync: &mut MutexGuard<FreeListPageResourceSync>,
281    ) -> i32 {
282        let mut rtn = freelist::FAILURE;
283        let required_chunks = crate::policy::space::required_chunks(pages);
284        let region = self.common.grow_discontiguous_space(
285            space_descriptor,
286            required_chunks,
287            Some(sync.free_list.as_mut()),
288        );
289
290        if !region.is_zero() {
291            let region_start = conversions::bytes_to_pages_up(region - sync.start);
292            let region_end = region_start + (required_chunks * PAGES_IN_CHUNK) - 1;
293            sync.free_list.set_uncoalescable(region_start as _);
294            sync.free_list.set_uncoalescable(region_end as i32 + 1);
295            for p in (region_start..region_end).step_by(PAGES_IN_CHUNK) {
296                if p != region_start {
297                    sync.free_list.clear_uncoalescable(p as _);
298                }
299                let liberated = sync.free_list.free(p as _, true); // add chunk to our free list
300                debug_assert!(liberated as usize == PAGES_IN_CHUNK + (p - region_start));
301                sync.pages_currently_on_freelist += PAGES_IN_CHUNK;
302            }
303            rtn = sync.free_list.alloc(pages as _); // re-do the request which triggered this call
304        }
305
306        rtn
307    }
308
309    unsafe fn free_contiguous_chunk(&self, chunk: Address, sync: &mut FreeListPageResourceSync) {
310        let num_chunks = self.vm_map().get_contiguous_region_chunks(chunk);
311        /* nail down all pages associated with the chunk, so it is no longer on our free list */
312        let mut chunk_start = conversions::bytes_to_pages_up(chunk - sync.start);
313        let chunk_end = chunk_start + (num_chunks * PAGES_IN_CHUNK);
314        while chunk_start < chunk_end {
315            sync.free_list.set_uncoalescable(chunk_start as _);
316            let tmp = sync
317                .free_list
318                .alloc_from_unit(PAGES_IN_CHUNK as _, chunk_start as _)
319                as usize; // then alloc the entire chunk
320            debug_assert!(tmp == chunk_start);
321            chunk_start += PAGES_IN_CHUNK;
322            sync.pages_currently_on_freelist -= PAGES_IN_CHUNK;
323        }
324        /* now return the address space associated with the chunk for global reuse */
325
326        self.common.release_discontiguous_chunks(chunk);
327    }
328
329    /// Release pages previously allocated by `alloc_pages`.
330    ///
331    /// Warning: This method acquires the mutex `self.sync`.  If multiple threads release pages
332    /// concurrently, the lock contention will become a performance bottleneck.  This is especially
333    /// problematic for plans that sweep objects in bulk in the `Release` stage.  Spaces except the
334    /// large object space are recommended to use [`BlockPageResource`] whenever possible.
335    ///
336    /// [`BlockPageResource`]: crate::util::heap::blockpageresource::BlockPageResource
337    pub fn release_pages(&self, first: Address) {
338        debug_assert!(conversions::is_page_aligned(first));
339        let mut sync = self.sync.lock().unwrap();
340        let page_offset = conversions::bytes_to_pages_up(first - sync.start);
341        let pages = sync.free_list.size(page_offset as _);
342        // if (VM.config.ZERO_PAGES_ON_RELEASE)
343        //     VM.memory.zero(false, first, Conversions.pagesToBytes(pages));
344        debug_assert!(pages as usize <= self.common.accounting.get_committed_pages());
345
346        if self.protect_memory_on_release.is_some() {
347            self.mprotect(first, pages as _);
348        }
349
350        self.common.accounting.release(pages as _);
351        let freed = sync.free_list.free(page_offset as _, true);
352        sync.pages_currently_on_freelist += pages as usize;
353        if !self.common.contiguous {
354            // only discontiguous spaces use chunks
355            self.release_free_chunks(first, freed as _, &mut sync);
356        }
357    }
358
359    fn release_free_chunks(
360        &self,
361        freed_page: Address,
362        pages_freed: usize,
363        sync: &mut FreeListPageResourceSync,
364    ) {
365        let page_offset = conversions::bytes_to_pages_up(freed_page - sync.start);
366
367        // may be multiple chunks
368        if pages_freed % PAGES_IN_CHUNK == 0 {
369            // necessary, but not sufficient condition
370            /* grow a region of chunks, starting with the chunk containing the freed page */
371            let mut region_start = page_offset & !(PAGES_IN_CHUNK - 1);
372            let mut next_region_start = region_start + PAGES_IN_CHUNK;
373            /* now try to grow (end point pages are marked as non-coalescing) */
374            while sync.free_list.is_coalescable(region_start as _) {
375                // region_start is guaranteed to be positive. Otherwise this line will fail due to subtraction overflow.
376                region_start -= PAGES_IN_CHUNK;
377            }
378            while next_region_start < freelist::MAX_UNITS as usize
379                && sync.free_list.is_coalescable(next_region_start as _)
380            {
381                next_region_start += PAGES_IN_CHUNK;
382            }
383            debug_assert!(next_region_start < freelist::MAX_UNITS as usize);
384            if pages_freed == next_region_start - region_start {
385                let start = sync.start;
386                unsafe {
387                    self.free_contiguous_chunk(
388                        start + conversions::pages_to_bytes(region_start),
389                        sync,
390                    );
391                }
392            }
393        }
394    }
395}