1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
use std::sync::{Mutex, MutexGuard};

use super::layout::vm_layout::PAGES_IN_CHUNK;
use super::layout::VMMap;
use super::pageresource::{PRAllocFail, PRAllocResult};
use super::PageResource;
use crate::mmtk::MMAPPER;
use crate::util::address::Address;
use crate::util::alloc::embedded_meta_data::*;
use crate::util::conversions;
use crate::util::freelist;
use crate::util::freelist::FreeList;
use crate::util::heap::layout::vm_layout::*;
use crate::util::heap::layout::CreateFreeListResult;
use crate::util::heap::pageresource::CommonPageResource;
use crate::util::heap::space_descriptor::SpaceDescriptor;
use crate::util::memory;
use crate::util::opaque_pointer::*;
use crate::util::raw_memory_freelist::RawMemoryFreeList;
use crate::vm::*;
use std::marker::PhantomData;

const UNINITIALIZED_WATER_MARK: i32 = -1;

pub struct FreeListPageResource<VM: VMBinding> {
    common: CommonPageResource,
    sync: Mutex<FreeListPageResourceSync>,
    _p: PhantomData<VM>,
    /// Protect memory on release, and unprotect on re-allocate.
    pub(crate) protect_memory_on_release: Option<memory::MmapProtection>,
}

unsafe impl<VM: VMBinding> Send for FreeListPageResource<VM> {}
unsafe impl<VM: VMBinding> Sync for FreeListPageResource<VM> {}

struct FreeListPageResourceSync {
    pub(crate) free_list: Box<dyn FreeList>,
    pages_currently_on_freelist: usize,
    start: Address,
    highwater_mark: i32,
}

impl<VM: VMBinding> PageResource<VM> for FreeListPageResource<VM> {
    fn common(&self) -> &CommonPageResource {
        &self.common
    }
    fn common_mut(&mut self) -> &mut CommonPageResource {
        &mut self.common
    }
    fn update_discontiguous_start(&mut self, start: Address) {
        // Only discontiguous FreeListPageResource needs adjustment.
        if !self.common.contiguous {
            // The adjustment happens when we still have a `&mut MMTK`.
            // We bypass the mutex lock by calling `get_mut`.
            let sync = self.sync.get_mut().unwrap();
            sync.start = start.align_up(BYTES_IN_REGION);
        }
    }

    fn get_available_physical_pages(&self) -> usize {
        let mut rtn = {
            let sync = self.sync.lock().unwrap();
            sync.pages_currently_on_freelist
        };

        if !self.common.contiguous {
            let chunks: usize = self
                .common
                .vm_map
                .get_available_discontiguous_chunks()
                .saturating_sub(self.common.vm_map.get_chunk_consumer_count());
            rtn += chunks * PAGES_IN_CHUNK;
        } else if self.common.growable && cfg!(target_pointer_width = "64") {
            rtn = vm_layout().pages_in_space64() - self.reserved_pages();
        }

        rtn
    }

    fn alloc_pages(
        &self,
        space_descriptor: SpaceDescriptor,
        reserved_pages: usize,
        required_pages: usize,
        tls: VMThread,
    ) -> Result<PRAllocResult, PRAllocFail> {
        let mut sync = self.sync.lock().unwrap();
        let mut new_chunk = false;
        let mut page_offset = sync.free_list.alloc(required_pages as _);
        if page_offset == freelist::FAILURE && self.common.growable {
            page_offset = unsafe {
                self.allocate_contiguous_chunks(space_descriptor, required_pages, &mut sync)
            };
            new_chunk = true;
        }

        if page_offset == freelist::FAILURE {
            return Result::Err(PRAllocFail);
        } else {
            sync.pages_currently_on_freelist -= required_pages;
            if page_offset > sync.highwater_mark {
                if sync.highwater_mark == UNINITIALIZED_WATER_MARK
                    || (page_offset ^ sync.highwater_mark) > PAGES_IN_REGION as i32
                {
                    new_chunk = true;
                }
                sync.highwater_mark = page_offset;
            }
        }

        let rtn = sync.start + conversions::pages_to_bytes(page_offset as _);
        // The meta-data portion of reserved Pages was committed above.
        self.commit_pages(reserved_pages, required_pages, tls);
        if self.protect_memory_on_release.is_some() {
            if !new_chunk {
                // This check is necessary to prevent us from mprotecting an address that is not yet mapped by mmapper.
                // See https://github.com/mmtk/mmtk-core/issues/400.
                // It is possible that one thread gets a new chunk, and returns from this function. However, the Space.acquire()
                // has not yet call ensure_mapped() for it. So the chunk is not yet mmapped. At this point, if another thread calls
                // this function, and get a few more pages from the same chunk, it is no longer seen as 'new_chunk', and we
                // will try to munprotect on it. But the chunk may not yet be mapped.
                //
                // If we want to improve and get rid of this loop, we need to move this munprotect to anywhere after the ensure_mapped() call
                // in Space.acquire(). We can either move it the option of 'protect_on_release' to space, or have a call to page resource
                // after ensure_mapped(). However, I think this is sufficient given that this option is only used for PageProtect for debugging use.
                while !new_chunk && !MMAPPER.is_mapped_address(rtn) {}
                self.munprotect(rtn, sync.free_list.size(page_offset as _) as _)
            } else if !self.common.contiguous && new_chunk {
                // Don't unprotect if this is a new unmapped discontiguous chunk
                // For a new mapped discontiguous chunk, this should previously be released and protected by us.
                // We still need to unprotect it.
                if MMAPPER.is_mapped_address(rtn) {
                    self.munprotect(rtn, sync.free_list.size(page_offset as _) as _)
                }
            }
        };
        Result::Ok(PRAllocResult {
            start: rtn,
            pages: required_pages,
            new_chunk,
        })
    }
}

impl<VM: VMBinding> FreeListPageResource<VM> {
    pub fn new_contiguous(start: Address, bytes: usize, vm_map: &'static dyn VMMap) -> Self {
        let pages = conversions::bytes_to_pages_up(bytes);
        let CreateFreeListResult {
            free_list,
            space_displacement,
        } = vm_map.create_parent_freelist(start, pages, PAGES_IN_REGION as _);

        // If it is RawMemoryFreeList, it will occupy `space_displacement` bytes at the start of
        // the space.  We add it to the start address.
        let actual_start = start + space_displacement;
        debug!(
            "  in new_contiguous: space_displacement = {:?}, actual_start = {}",
            space_displacement, actual_start
        );

        let growable = cfg!(target_pointer_width = "64");
        FreeListPageResource {
            common: CommonPageResource::new(true, growable, vm_map),
            sync: Mutex::new(FreeListPageResourceSync {
                free_list,
                pages_currently_on_freelist: if growable { 0 } else { pages },
                start: actual_start,
                highwater_mark: UNINITIALIZED_WATER_MARK,
            }),
            _p: PhantomData,
            protect_memory_on_release: None,
        }
    }

    pub fn new_discontiguous(vm_map: &'static dyn VMMap) -> Self {
        // This is a place-holder value that is used by neither `vm_map.create_freelist` nor the
        // space.  The location of discontiguous spaces is not determined before all contiguous
        // spaces are places, at which time the starting address of discontiguous spaces will be
        // updated to the correct value.
        let start = vm_layout().available_start();

        let CreateFreeListResult {
            free_list,
            space_displacement,
        } = vm_map.create_freelist(start);

        // In theory, nothing prevents us from using `RawMemoryFreeList` for discontiguous spaces.
        // But in the current implementation, only `Map32` supports discontiguous spaces, and
        // `Map32` only uses `IntArrayFreeList`.
        debug_assert!(
            free_list.downcast_ref::<RawMemoryFreeList>().is_none(),
            "We can't allocate RawMemoryFreeList for discontiguous spaces."
        );

        // Discontiguous free list page resources are only used by `Map32` which uses
        // `IntArrayFreeList` exclusively.  It does not have space displacement.
        debug_assert_eq!(space_displacement, 0);
        debug!("new_discontiguous. start: {start})");

        FreeListPageResource {
            common: CommonPageResource::new(false, true, vm_map),
            sync: Mutex::new(FreeListPageResourceSync {
                free_list,
                pages_currently_on_freelist: 0,
                start,
                highwater_mark: UNINITIALIZED_WATER_MARK,
            }),
            _p: PhantomData,
            protect_memory_on_release: None,
        }
    }

    /// Protect the memory
    fn mprotect(&self, start: Address, pages: usize) {
        // We may fail here for ENOMEM, especially in PageProtect plan.
        // See: https://man7.org/linux/man-pages/man2/mprotect.2.html#ERRORS
        // > Changing the protection of a memory region would result in
        // > the total number of mappings with distinct attributes
        // > (e.g., read versus read/write protection) exceeding the
        // > allowed maximum.
        assert!(self.protect_memory_on_release.is_some());
        // We are not using mmapper.protect(). mmapper.protect() protects the whole chunk and
        // may protect memory that is still in use.
        if let Err(e) = memory::mprotect(start, conversions::pages_to_bytes(pages)) {
            panic!(
                "Failed at protecting memory (starting at {}): {:?}",
                start, e
            );
        }
    }

    /// Unprotect the memory
    fn munprotect(&self, start: Address, pages: usize) {
        assert!(self.protect_memory_on_release.is_some());
        if let Err(e) = memory::munprotect(
            start,
            conversions::pages_to_bytes(pages),
            self.protect_memory_on_release.unwrap(),
        ) {
            panic!(
                "Failed at unprotecting memory (starting at {}): {:?}",
                start, e
            );
        }
    }

    pub(crate) fn allocate_one_chunk_no_commit(
        &self,
        space_descriptor: SpaceDescriptor,
    ) -> Result<PRAllocResult, PRAllocFail> {
        assert!(self.common.growable);
        // FIXME: We need a safe implementation
        let mut sync = self.sync.lock().unwrap();
        let page_offset =
            unsafe { self.allocate_contiguous_chunks(space_descriptor, PAGES_IN_CHUNK, &mut sync) };

        if page_offset == freelist::FAILURE {
            return Result::Err(PRAllocFail);
        } else {
            sync.pages_currently_on_freelist -= PAGES_IN_CHUNK;
            if page_offset > sync.highwater_mark {
                sync.highwater_mark = page_offset;
            }
        }

        let rtn = sync.start + conversions::pages_to_bytes(page_offset as _);
        Result::Ok(PRAllocResult {
            start: rtn,
            pages: PAGES_IN_CHUNK,
            new_chunk: true,
        })
    }

    unsafe fn allocate_contiguous_chunks(
        &self,
        space_descriptor: SpaceDescriptor,
        pages: usize,
        sync: &mut MutexGuard<FreeListPageResourceSync>,
    ) -> i32 {
        let mut rtn = freelist::FAILURE;
        let required_chunks = crate::policy::space::required_chunks(pages);
        let region = self.common.grow_discontiguous_space(
            space_descriptor,
            required_chunks,
            Some(sync.free_list.as_mut()),
        );

        if !region.is_zero() {
            let region_start = conversions::bytes_to_pages_up(region - sync.start);
            let region_end = region_start + (required_chunks * PAGES_IN_CHUNK) - 1;
            sync.free_list.set_uncoalescable(region_start as _);
            sync.free_list.set_uncoalescable(region_end as i32 + 1);
            for p in (region_start..region_end).step_by(PAGES_IN_CHUNK) {
                if p != region_start {
                    sync.free_list.clear_uncoalescable(p as _);
                }
                let liberated = sync.free_list.free(p as _, true); // add chunk to our free list
                debug_assert!(liberated as usize == PAGES_IN_CHUNK + (p - region_start));
                sync.pages_currently_on_freelist += PAGES_IN_CHUNK;
            }
            rtn = sync.free_list.alloc(pages as _); // re-do the request which triggered this call
        }

        rtn
    }

    unsafe fn free_contiguous_chunk(&self, chunk: Address, sync: &mut FreeListPageResourceSync) {
        let num_chunks = self.vm_map().get_contiguous_region_chunks(chunk);
        /* nail down all pages associated with the chunk, so it is no longer on our free list */
        let mut chunk_start = conversions::bytes_to_pages_up(chunk - sync.start);
        let chunk_end = chunk_start + (num_chunks * PAGES_IN_CHUNK);
        while chunk_start < chunk_end {
            sync.free_list.set_uncoalescable(chunk_start as _);
            let tmp = sync
                .free_list
                .alloc_from_unit(PAGES_IN_CHUNK as _, chunk_start as _)
                as usize; // then alloc the entire chunk
            debug_assert!(tmp == chunk_start);
            chunk_start += PAGES_IN_CHUNK;
            sync.pages_currently_on_freelist -= PAGES_IN_CHUNK;
        }
        /* now return the address space associated with the chunk for global reuse */

        self.common.release_discontiguous_chunks(chunk);
    }

    /// Release pages previously allocated by `alloc_pages`.
    ///
    /// Warning: This method acquires the mutex `self.sync`.  If multiple threads release pages
    /// concurrently, the lock contention will become a performance bottleneck.  This is especially
    /// problematic for plans that sweep objects in bulk in the `Release` stage.  Spaces except the
    /// large object space are recommended to use [`BlockPageResource`] whenever possible.
    ///
    /// [`BlockPageResource`]: crate::util::heap::blockpageresource::BlockPageResource
    pub fn release_pages(&self, first: Address) {
        debug_assert!(conversions::is_page_aligned(first));
        let mut sync = self.sync.lock().unwrap();
        let page_offset = conversions::bytes_to_pages_up(first - sync.start);
        let pages = sync.free_list.size(page_offset as _);
        // if (VM.config.ZERO_PAGES_ON_RELEASE)
        //     VM.memory.zero(false, first, Conversions.pagesToBytes(pages));
        debug_assert!(pages as usize <= self.common.accounting.get_committed_pages());

        if self.protect_memory_on_release.is_some() {
            self.mprotect(first, pages as _);
        }

        self.common.accounting.release(pages as _);
        let freed = sync.free_list.free(page_offset as _, true);
        sync.pages_currently_on_freelist += pages as usize;
        if !self.common.contiguous {
            // only discontiguous spaces use chunks
            self.release_free_chunks(first, freed as _, &mut sync);
        }
    }

    fn release_free_chunks(
        &self,
        freed_page: Address,
        pages_freed: usize,
        sync: &mut FreeListPageResourceSync,
    ) {
        let page_offset = conversions::bytes_to_pages_up(freed_page - sync.start);

        // may be multiple chunks
        if pages_freed % PAGES_IN_CHUNK == 0 {
            // necessary, but not sufficient condition
            /* grow a region of chunks, starting with the chunk containing the freed page */
            let mut region_start = page_offset & !(PAGES_IN_CHUNK - 1);
            let mut next_region_start = region_start + PAGES_IN_CHUNK;
            /* now try to grow (end point pages are marked as non-coalescing) */
            while sync.free_list.is_coalescable(region_start as _) {
                // region_start is guaranteed to be positive. Otherwise this line will fail due to subtraction overflow.
                region_start -= PAGES_IN_CHUNK;
            }
            while next_region_start < freelist::MAX_UNITS as usize
                && sync.free_list.is_coalescable(next_region_start as _)
            {
                next_region_start += PAGES_IN_CHUNK;
            }
            debug_assert!(next_region_start < freelist::MAX_UNITS as usize);
            if pages_freed == next_region_start - region_start {
                let start = sync.start;
                unsafe {
                    self.free_contiguous_chunk(
                        start + conversions::pages_to_bytes(region_start),
                        sync,
                    );
                }
            }
        }
    }
}