mmtk/util/
raw_memory_freelist.rs

1use super::freelist::*;
2use crate::util::address::Address;
3use crate::util::constants::*;
4use crate::util::conversions;
5use crate::util::os::*;
6
7/** log2 of the number of bits used by a free list entry (two entries per unit) */
8const LOG_ENTRY_BITS: usize = i32::BITS.ilog2() as _;
9
10/** log2 of the number of bytes used by a free list entry (two entries per unit) */
11const LOG_BYTES_IN_ENTRY: usize = LOG_ENTRY_BITS - (LOG_BITS_IN_BYTE as usize);
12
13/** log2 of the number of bytes used by a free list unit */
14const LOG_BYTES_IN_UNIT: usize = LOG_BYTES_IN_ENTRY + 1;
15
16#[derive(Debug)]
17pub struct RawMemoryFreeList {
18    pub head: i32,
19    pub heads: i32,
20    base: Address,
21    limit: Address,
22    high_water: Address,
23    max_units: i32,
24    grain: i32,
25    current_units: i32,
26    pages_per_block: i32,
27    strategy: MmapStrategy,
28    slice: &'static mut [i32],
29}
30
31impl FreeList for RawMemoryFreeList {
32    fn head(&self) -> i32 {
33        self.head
34    }
35    fn heads(&self) -> i32 {
36        self.heads
37    }
38    fn get_entry(&self, index: i32) -> i32 {
39        #[cfg(debug_assertions)]
40        {
41            let len = (self.high_water - self.base) >> LOG_BYTES_IN_ENTRY;
42            debug_assert_eq!(
43                len,
44                self.slice.len(),
45                "Length does not match.  \
46                high_water: {h}, base: {b}, computed len: {len}, \
47                slice len: {sl}",
48                h = self.high_water,
49                b = self.base,
50                sl = self.slice.len()
51            );
52        }
53        self.slice[index as usize]
54    }
55    fn set_entry(&mut self, index: i32, value: i32) {
56        #[cfg(debug_assertions)]
57        {
58            let len = (self.high_water - self.base) >> LOG_BYTES_IN_ENTRY;
59            debug_assert_eq!(
60                len,
61                self.slice.len(),
62                "Length does not match.  \
63                high_water: {h}, base: {b}, computed len: {len}, \
64                slice len: {sl}",
65                h = self.high_water,
66                b = self.base,
67                sl = self.slice.len()
68            );
69        }
70        self.slice[index as usize] = value;
71    }
72    fn alloc(&mut self, size: i32) -> i32 {
73        if self.current_units == 0 {
74            return FAILURE;
75        }
76        let mut unit = self.head();
77        let mut s = 0;
78        while ({
79            unit = self.get_next(unit);
80            unit != self.head()
81        }) && ({
82            s = self.get_size(unit);
83            s < size
84        }) {}
85        if unit == self.head() {
86            FAILURE
87        } else {
88            self.__alloc(size, unit, s)
89        }
90    }
91}
92
93impl RawMemoryFreeList {
94    fn units_per_block(&self) -> i32 {
95        (conversions::pages_to_bytes(self.pages_per_block as _) >> LOG_BYTES_IN_UNIT) as _
96    }
97    fn units_in_first_block(&self) -> i32 {
98        self.units_per_block() - self.heads - 1
99    }
100    pub fn default_block_size(units: i32, heads: i32) -> i32 {
101        usize::min(Self::size_in_pages(units, heads) as _, 16) as _
102    }
103    pub fn size_in_pages(units: i32, heads: i32) -> i32 {
104        let map_size = ((units + heads + 1) as usize) << LOG_BYTES_IN_UNIT;
105        conversions::bytes_to_pages_up(map_size as _) as _
106    }
107
108    pub fn new(
109        base: Address,
110        limit: Address,
111        pages_per_block: i32,
112        units: i32,
113        grain: i32,
114        heads: i32,
115        strategy: MmapStrategy,
116    ) -> Self {
117        debug_assert!(units <= MAX_UNITS && heads <= MAX_HEADS);
118        debug_assert!(
119            base + conversions::pages_to_bytes(Self::size_in_pages(units, heads) as _) <= limit
120        );
121        Self {
122            head: -1,
123            heads,
124            base,
125            limit,
126            high_water: base,
127            max_units: units,
128            grain,
129            current_units: 0,
130            pages_per_block,
131            strategy,
132            // SAFETY: when a RawMemoryFreelist is created, its address range is
133            // base..base, a zero-sized slice starting at base
134            slice: unsafe { std::slice::from_raw_parts_mut(base.to_mut_ptr::<i32>(), 0) },
135        }
136    }
137
138    fn current_capacity(&self) -> i32 {
139        let list_blocks = conversions::bytes_to_pages_up(self.high_water - self.base) as i32
140            / self.pages_per_block;
141        self.units_in_first_block() + (list_blocks - 1) * self.units_per_block()
142    }
143
144    pub fn grow_freelist(&mut self, units: i32) -> bool {
145        let required_units = units + self.current_units;
146        if required_units > self.max_units {
147            return false;
148        }
149        let blocks = if required_units > self.current_capacity() {
150            let units_requested = required_units - self.current_capacity();
151            (units_requested + self.units_per_block() - 1) / self.units_per_block()
152        } else {
153            0
154        };
155        self.grow_list_by_blocks(blocks, required_units);
156        true
157    }
158    fn grow_list_by_blocks(&mut self, blocks: i32, new_max: i32) {
159        debug_assert!(
160            (new_max <= self.grain) || (((new_max / self.grain) * self.grain) == new_max)
161        );
162
163        if blocks > 0 {
164            // Allocate more VM from the OS
165            self.raise_high_water(blocks);
166        }
167
168        let len = (self.high_water - self.base) >> LOG_BYTES_IN_ENTRY;
169        // SAFETY: The memory is mapped and valid after `raise_high_water`.
170        self.slice = unsafe { std::slice::from_raw_parts_mut(self.base.to_mut_ptr::<i32>(), len) };
171
172        let old_max = self.current_units;
173        assert!(
174            new_max <= self.current_capacity(),
175            "blocks and new max are inconsistent: need more blocks for the requested capacity"
176        );
177        assert!(
178            new_max <= self.max_units,
179            "Requested list to grow larger than the configured maximum"
180        );
181        self.current_units = new_max;
182
183        if old_max == 0 {
184            // First allocation of capacity: initialize the sentinels.
185            for i in 1..=self.heads {
186                self.set_sentinel(-i);
187            }
188        } else {
189            // Turn the old top-of-heap sentinel into a single used block
190            self.set_size(old_max, 1);
191        }
192
193        if new_max == 0 {
194            return;
195        }
196
197        // Set a sentinel at the top of the new range
198        self.set_sentinel(new_max);
199
200        let mut cursor = new_max;
201
202        /* A series of grain size regions in the middle */
203        let grain = i32::min(self.grain, new_max - old_max);
204        cursor -= grain;
205        while cursor >= old_max {
206            self.set_size(cursor, grain);
207            self.add_to_free(cursor);
208            cursor -= grain;
209        }
210    }
211
212    fn raise_high_water(&mut self, blocks: i32) {
213        let mut grow_extent = conversions::pages_to_bytes((self.pages_per_block * blocks) as _);
214        assert_ne!(
215            self.high_water, self.limit,
216            "Attempt to grow FreeList beyond limit"
217        );
218        if self.high_water + grow_extent > self.limit {
219            grow_extent = self.high_water - self.limit;
220        }
221        self.mmap(self.high_water, grow_extent);
222        self.high_water += grow_extent;
223    }
224
225    fn mmap(&self, start: Address, bytes: usize) {
226        let res = OS::dzmmap(
227            start,
228            bytes,
229            self.strategy,
230            &MmapAnnotation::Misc {
231                name: "RawMemoryFreeList",
232            },
233        );
234        assert!(res.is_ok(), "Can't get more space with mmap()");
235    }
236    pub fn get_limit(&self) -> Address {
237        self.limit
238    }
239}
240
241/**
242 * See documentation of `mod tests` below for the necessity of `impl Drop`.
243 */
244#[cfg(test)]
245impl Drop for RawMemoryFreeList {
246    fn drop(&mut self) {
247        let len = self.high_water - self.base;
248        if len != 0 {
249            let _ = OS::munmap(self.base, len);
250        }
251    }
252}
253
254/**
255 * The initialization of `RawMemoryFreeList` involves memory-mapping a fixed range of virtual address.
256 *
257 * This raises an implicit assumption that a test process can only have one `RawMemoryFreeList` instance at a time unless each instance uses different fixed address ranges.
258 *
259 * We use a single fixed address range for all the following tests. So the tests cannot be executed in parallel. Which means:
260 *
261 * 1. Each test should hold a global mutex to prevent parallel execution.
262 * 2. `RawMemoryFreeList` should implement `Drop` trait to unmap the memory properly at the end of each test.
263 */
264#[cfg(test)]
265mod tests {
266    use super::FreeList;
267    use super::*;
268    use std::sync::{Mutex, MutexGuard};
269
270    const TOP_SENTINEL: i32 = -1;
271    const FIRST_UNIT: i32 = 0;
272
273    lazy_static! {
274        /**
275         * See documentation of `mod tests` above for for the necessity of this mutex.
276         */
277        static ref MUTEX: Mutex<()> = Mutex::new(());
278    }
279
280    fn new_raw_memory_freelist<'a>(
281        list_size: usize,
282        grain: i32,
283    ) -> (MutexGuard<'a, ()>, RawMemoryFreeList, i32, i32, i32) {
284        /*
285         * Note: The mutex could be poisoned!
286         * Test `free_list_access_out_of_bounds` below is expected to panic and poison the mutex.
287         * So we need to manually recover the lock here, if it is poisoned.
288         *
289         * See documentation of `mod tests` above for more details.
290         */
291        let guard = match MUTEX.lock() {
292            Ok(guard) => guard,
293            Err(poisoned) => poisoned.into_inner(),
294        };
295        let start = crate::util::test_util::RAW_MEMORY_FREELIST_TEST_REGION.start;
296        let extent = BYTES_IN_PAGE;
297        let pages_per_block = RawMemoryFreeList::default_block_size(list_size as _, 1);
298        assert_eq!(pages_per_block, 1);
299        let mut l = RawMemoryFreeList::new(
300            start,
301            start + extent,
302            pages_per_block,
303            list_size as _,
304            grain,
305            1,
306            MmapStrategy::TEST,
307        );
308        // Grow the free-list to do the actual memory-mapping.
309        l.grow_freelist(list_size as _);
310        let last_unit = list_size as i32 - grain;
311        let bottom_sentinel = list_size as i32;
312        (guard, l, list_size as _, last_unit, bottom_sentinel)
313    }
314
315    #[test]
316    #[allow(clippy::cognitive_complexity)] // extensive checks, and it doesn't matter for tests
317    fn new_free_list_grain1() {
318        let (_guard, l, _, last_unit, bottom_sentinel) = new_raw_memory_freelist(5, 1);
319        assert_eq!(l.head(), TOP_SENTINEL);
320
321        assert_eq!(l.get_prev(TOP_SENTINEL), last_unit);
322        assert_eq!(l.get_next(TOP_SENTINEL), FIRST_UNIT);
323
324        assert_eq!(l.get_size(FIRST_UNIT), 1);
325        assert_eq!(l.get_left(FIRST_UNIT), -1);
326        assert_eq!(l.get_prev(FIRST_UNIT), -1);
327        assert_eq!(l.get_right(FIRST_UNIT), 1);
328        assert_eq!(l.get_next(FIRST_UNIT), 1);
329        assert!(l.is_free(FIRST_UNIT));
330        assert!(l.is_coalescable(FIRST_UNIT));
331        assert!(!l.is_multi(FIRST_UNIT));
332
333        assert_eq!(l.get_size(1), 1);
334        assert_eq!(l.get_left(1), 0);
335        assert_eq!(l.get_prev(1), 0);
336        assert_eq!(l.get_right(1), 2);
337        assert_eq!(l.get_next(1), 2);
338        assert!(l.is_free(1));
339        assert!(l.is_coalescable(1));
340        assert!(!l.is_multi(1));
341
342        assert_eq!(l.get_size(last_unit), 1);
343        assert_eq!(l.get_left(last_unit), last_unit - 1);
344        assert_eq!(l.get_prev(last_unit), last_unit - 1);
345        assert_eq!(l.get_right(last_unit), bottom_sentinel);
346        assert_eq!(l.get_next(last_unit), -1);
347        assert!(l.is_free(last_unit));
348        assert!(l.is_coalescable(last_unit));
349        assert!(!l.is_multi(last_unit));
350
351        assert_eq!(l.get_prev(bottom_sentinel), bottom_sentinel);
352        assert_eq!(l.get_next(bottom_sentinel), bottom_sentinel);
353    }
354
355    #[test]
356    #[allow(clippy::cognitive_complexity)] // extensive checks, and it doesn't matter for tests
357    fn new_free_list_grain2() {
358        let (_guard, l, _, last_unit, bottom_sentinel) = new_raw_memory_freelist(6, 2);
359        assert_eq!(l.head(), TOP_SENTINEL);
360
361        assert_eq!(l.get_prev(TOP_SENTINEL), last_unit);
362        assert_eq!(l.get_next(TOP_SENTINEL), FIRST_UNIT);
363
364        assert_eq!(l.get_size(FIRST_UNIT), 2);
365        assert_eq!(l.get_left(FIRST_UNIT), -1);
366        assert_eq!(l.get_prev(FIRST_UNIT), -1);
367        assert_eq!(l.get_right(FIRST_UNIT), 2);
368        assert_eq!(l.get_next(FIRST_UNIT), 2);
369        assert!(l.is_free(FIRST_UNIT));
370        assert!(l.is_coalescable(FIRST_UNIT));
371        assert!(l.is_multi(FIRST_UNIT));
372
373        assert_eq!(l.get_size(2), 2);
374        assert_eq!(l.get_left(2), 0);
375        assert_eq!(l.get_prev(2), 0);
376        assert_eq!(l.get_right(2), 4);
377        assert_eq!(l.get_next(2), 4);
378        assert!(l.is_free(2));
379        assert!(l.is_coalescable(2));
380        assert!(l.is_multi(2));
381
382        assert_eq!(l.get_size(last_unit), 2);
383        assert_eq!(l.get_left(last_unit), last_unit - 2);
384        assert_eq!(l.get_prev(last_unit), last_unit - 2);
385        assert_eq!(l.get_right(last_unit), bottom_sentinel);
386        assert_eq!(l.get_next(last_unit), -1);
387        assert!(l.is_free(last_unit));
388        assert!(l.is_coalescable(last_unit));
389        assert!(l.is_multi(last_unit));
390
391        assert_eq!(l.get_prev(bottom_sentinel), bottom_sentinel);
392        assert_eq!(l.get_next(bottom_sentinel), bottom_sentinel);
393    }
394
395    #[test]
396    #[should_panic]
397    fn free_list_access_out_of_bounds() {
398        let (_guard, l, _, _, _) = new_raw_memory_freelist(5, 1);
399        l.get_size(4096);
400        // `_guard` should be dropped during stack unwinding
401    }
402
403    #[test]
404    fn alloc_fit() {
405        let (_guard, mut l, _, last_unit, _) = new_raw_memory_freelist(6, 2);
406        let result = l.alloc(2);
407        assert_eq!(result, 0);
408
409        const NEXT: i32 = 2;
410
411        assert_eq!(l.get_prev(TOP_SENTINEL), last_unit);
412        assert_eq!(l.get_next(TOP_SENTINEL), NEXT);
413
414        assert_eq!(l.get_size(FIRST_UNIT), 2);
415        assert_eq!(l.get_left(FIRST_UNIT), -1);
416        assert_eq!(l.get_prev(FIRST_UNIT), -1);
417        assert_eq!(l.get_right(FIRST_UNIT), 2);
418        assert_eq!(l.get_next(FIRST_UNIT), 2);
419        assert!(!l.is_free(FIRST_UNIT)); // not free
420        assert!(l.is_coalescable(FIRST_UNIT));
421        assert!(l.is_multi(FIRST_UNIT));
422
423        assert_eq!(l.get_size(2), 2);
424        assert_eq!(l.get_left(2), 0);
425        assert_eq!(l.get_prev(2), -1); // no prev now
426        assert_eq!(l.get_right(2), 4);
427        assert_eq!(l.get_next(2), 4);
428        assert!(l.is_free(2));
429        assert!(l.is_coalescable(2));
430        assert!(l.is_multi(2));
431    }
432
433    #[test]
434    #[allow(clippy::cognitive_complexity)] // extensive checks, and it doesn't matter for tests
435    fn alloc_split() {
436        let (_guard, mut l, _, last_unit, _) = new_raw_memory_freelist(6, 2);
437        let result = l.alloc(1);
438        assert_eq!(result, 0);
439
440        const NEXT: i32 = 1;
441        assert_eq!(l.get_prev(TOP_SENTINEL), last_unit);
442        assert_eq!(l.get_next(TOP_SENTINEL), NEXT);
443
444        assert_eq!(l.get_size(FIRST_UNIT), 1);
445        assert_eq!(l.get_left(FIRST_UNIT), -1);
446        assert_eq!(l.get_prev(FIRST_UNIT), 1); // prev is 1 now
447        assert_eq!(l.get_right(FIRST_UNIT), 1); // right is 1 now
448        assert_eq!(l.get_next(FIRST_UNIT), 2);
449        assert!(!l.is_free(FIRST_UNIT)); // not free
450        assert!(l.is_coalescable(FIRST_UNIT));
451        assert!(!l.is_multi(FIRST_UNIT)); // not multi
452
453        assert_eq!(l.get_size(1), 1);
454        assert_eq!(l.get_left(1), 0); // unit1's left is 0
455        assert_eq!(l.get_prev(1), -1); // unit1's prev is -1 (no prev, unit1 is removed form the list)
456        assert_eq!(l.get_right(1), 2);
457        assert_eq!(l.get_next(1), 2);
458        assert!(l.is_free(1)); // not free
459        assert!(l.is_coalescable(1));
460        assert!(!l.is_multi(1)); // not multi
461
462        assert_eq!(l.get_size(2), 2);
463        assert_eq!(l.get_left(2), 1);
464        assert_eq!(l.get_prev(2), 1); // uni2's prev is 1 now
465        assert_eq!(l.get_right(2), 4);
466        assert_eq!(l.get_next(2), 4);
467        assert!(l.is_free(2));
468        assert!(l.is_coalescable(2));
469        assert!(l.is_multi(2));
470    }
471
472    #[test]
473    fn alloc_split_twice() {
474        let (_guard, mut l, _, _, _) = new_raw_memory_freelist(6, 2);
475        // Alloc size 1 and cause split
476        let res1 = l.alloc(1);
477        assert_eq!(res1, 0);
478        // Alloc size 1
479        let res2 = l.alloc(1);
480        assert_eq!(res2, 1);
481
482        // Next available unit has no prev now
483        assert_eq!(l.get_prev(2), -1);
484    }
485
486    #[test]
487    fn alloc_skip() {
488        let (_guard, mut l, _, _, _) = new_raw_memory_freelist(6, 2);
489        // Alloc size 1 and cause split
490        let res1 = l.alloc(1);
491        assert_eq!(res1, 0);
492        // Alloc size 2, we skip unit1
493        let res2 = l.alloc(2);
494        assert_eq!(res2, 2);
495
496        // unit1 is still free, and linked with unit4
497        assert!(l.is_free(1));
498        assert_eq!(l.get_next(1), 4);
499        assert_eq!(l.get_prev(4), 1);
500    }
501
502    #[test]
503    fn alloc_exhaust() {
504        let (_guard, mut l, _, _, _) = new_raw_memory_freelist(6, 2);
505        let res1 = l.alloc(2);
506        assert_eq!(res1, 0);
507        let res2 = l.alloc(2);
508        assert_eq!(res2, 2);
509        let res3 = l.alloc(2);
510        assert_eq!(res3, 4);
511        let res4 = l.alloc(2);
512        assert_eq!(res4, FAILURE);
513    }
514
515    #[test]
516    fn free_unit() {
517        let (_guard, mut l, _, _, _) = new_raw_memory_freelist(6, 2);
518        let res1 = l.alloc(2);
519        assert_eq!(res1, 0);
520        let res2 = l.alloc(2);
521        assert_eq!(res2, 2);
522
523        // Unit4 is still free, but has no prev
524        assert_eq!(l.get_prev(4), -1);
525
526        // Free Unit2
527        let freed = l.free(res2, false);
528        assert_eq!(freed, res2);
529        assert!(l.is_free(res2));
530    }
531
532    #[test]
533    fn free_coalesce() {
534        let (_guard, mut l, _, _, _) = new_raw_memory_freelist(6, 2);
535        let res1 = l.alloc(2);
536        assert_eq!(res1, 0);
537        let res2 = l.alloc(2);
538        assert_eq!(res2, 2);
539
540        // Free Unit2. It will coalesce with Unit4
541        let coalesced_size = l.free(res2, true);
542        assert_eq!(coalesced_size, 4);
543    }
544
545    #[test]
546    fn free_cant_coalesce() {
547        let (_guard, mut l, _, _, _) = new_raw_memory_freelist(6, 2);
548        let res1 = l.alloc(2);
549        assert_eq!(res1, 0);
550        let res2 = l.alloc(2);
551        assert_eq!(res2, 2);
552        let res3 = l.alloc(1);
553        assert_eq!(res3, 4);
554
555        // Free Unit2. It cannot coalesce with Unit4
556        let coalesced_size = l.free(res2, true);
557        assert_eq!(coalesced_size, 2);
558    }
559
560    #[test]
561    fn free_realloc() {
562        let (_guard, mut l, _, _, _) = new_raw_memory_freelist(6, 2);
563        let res1 = l.alloc(2);
564        assert_eq!(res1, 0);
565        let res2 = l.alloc(2);
566        assert_eq!(res2, 2);
567
568        // Unit4 is still free, but has no prev
569        assert_eq!(l.get_prev(4), -1);
570
571        // Free Unit2
572        let freed = l.free(res2, false);
573        assert_eq!(freed, res2);
574        assert!(l.is_free(res2));
575
576        // Alloc again
577        let res3 = l.alloc(2);
578        assert_eq!(res3, 2);
579        assert!(!l.is_free(res3));
580
581        let res4 = l.alloc(1);
582        assert_eq!(res4, 4);
583    }
584}