1use super::freelist::*;
2use crate::util::address::Address;
3use crate::util::constants::*;
4use crate::util::conversions;
5use crate::util::os::*;
6
7const LOG_ENTRY_BITS: usize = LOG_BITS_IN_INT as _;
9
10const LOG_BYTES_IN_ENTRY: usize = LOG_ENTRY_BITS - (LOG_BITS_IN_BYTE as usize);
12
13const LOG_BYTES_IN_UNIT: usize = LOG_BYTES_IN_ENTRY + 1;
15
16#[derive(Debug)]
17pub struct RawMemoryFreeList {
18 pub head: i32,
19 pub heads: i32,
20 base: Address,
21 limit: Address,
22 high_water: Address,
23 max_units: i32,
24 grain: i32,
25 current_units: i32,
26 pages_per_block: i32,
27 strategy: MmapStrategy,
28}
29
30impl FreeList for RawMemoryFreeList {
31 fn head(&self) -> i32 {
32 self.head
33 }
34 fn heads(&self) -> i32 {
35 self.heads
36 }
37 fn get_entry(&self, index: i32) -> i32 {
38 let offset = (index << LOG_BYTES_IN_ENTRY) as usize;
39 debug_assert!(self.base + offset >= self.base && self.base + offset < self.high_water);
40 unsafe { (self.base + offset).load() }
41 }
42 fn set_entry(&mut self, index: i32, value: i32) {
43 let offset = (index << LOG_BYTES_IN_ENTRY) as usize;
44 debug_assert!(
45 self.base + offset >= self.base && self.base + offset < self.high_water,
46 "base={:?} offset={:?} index={:?} high_water={:?}",
47 self.base,
48 offset,
49 self.base + offset,
50 self.high_water
51 );
52 unsafe { (self.base + offset).store(value) }
53 }
54 fn alloc(&mut self, size: i32) -> i32 {
55 if self.current_units == 0 {
56 return FAILURE;
57 }
58 let mut unit = self.head();
59 let mut s = 0;
60 while ({
61 unit = self.get_next(unit);
62 unit != self.head()
63 }) && ({
64 s = self.get_size(unit);
65 s < size
66 }) {}
67 if unit == self.head() {
68 FAILURE
69 } else {
70 self.__alloc(size, unit, s)
71 }
72 }
73}
74
75impl RawMemoryFreeList {
76 fn units_per_block(&self) -> i32 {
77 (conversions::pages_to_bytes(self.pages_per_block as _) >> LOG_BYTES_IN_UNIT) as _
78 }
79 fn units_in_first_block(&self) -> i32 {
80 self.units_per_block() - self.heads - 1
81 }
82 pub fn default_block_size(units: i32, heads: i32) -> i32 {
83 usize::min(Self::size_in_pages(units, heads) as _, 16) as _
84 }
85 pub fn size_in_pages(units: i32, heads: i32) -> i32 {
86 let map_size = ((units + heads + 1) as usize) << LOG_BYTES_IN_UNIT;
87 conversions::bytes_to_pages_up(map_size as _) as _
88 }
89
90 pub fn new(
91 base: Address,
92 limit: Address,
93 pages_per_block: i32,
94 units: i32,
95 grain: i32,
96 heads: i32,
97 strategy: MmapStrategy,
98 ) -> Self {
99 debug_assert!(units <= MAX_UNITS && heads <= MAX_HEADS);
100 debug_assert!(
101 base + conversions::pages_to_bytes(Self::size_in_pages(units, heads) as _) <= limit
102 );
103 Self {
104 head: -1,
105 heads,
106 base,
107 limit,
108 high_water: base,
109 max_units: units,
110 grain,
111 current_units: 0,
112 pages_per_block,
113 strategy,
114 }
115 }
116
117 fn current_capacity(&self) -> i32 {
118 let list_blocks = conversions::bytes_to_pages_up(self.high_water - self.base) as i32
119 / self.pages_per_block;
120 self.units_in_first_block() + (list_blocks - 1) * self.units_per_block()
121 }
122
123 pub fn grow_freelist(&mut self, units: i32) -> bool {
124 let required_units = units + self.current_units;
125 if required_units > self.max_units {
126 return false;
127 }
128 let blocks = if required_units > self.current_capacity() {
129 let units_requested = required_units - self.current_capacity();
130 (units_requested + self.units_per_block() - 1) / self.units_per_block()
131 } else {
132 0
133 };
134 self.grow_list_by_blocks(blocks, required_units);
135 true
136 }
137 fn grow_list_by_blocks(&mut self, blocks: i32, new_max: i32) {
138 debug_assert!(
139 (new_max <= self.grain) || (((new_max / self.grain) * self.grain) == new_max)
140 );
141
142 if blocks > 0 {
143 self.raise_high_water(blocks);
145 }
146
147 let old_max = self.current_units;
148 assert!(
149 new_max <= self.current_capacity(),
150 "blocks and new max are inconsistent: need more blocks for the requested capacity"
151 );
152 assert!(
153 new_max <= self.max_units,
154 "Requested list to grow larger than the configured maximum"
155 );
156 self.current_units = new_max;
157
158 if old_max == 0 {
159 for i in 1..=self.heads {
161 self.set_sentinel(-i);
162 }
163 } else {
164 self.set_size(old_max, 1);
166 }
167
168 if new_max == 0 {
169 return;
170 }
171
172 self.set_sentinel(new_max);
174
175 let mut cursor = new_max;
176
177 let grain = i32::min(self.grain, new_max - old_max);
179 cursor -= grain;
180 while cursor >= old_max {
181 self.set_size(cursor, grain);
182 self.add_to_free(cursor);
183 cursor -= grain;
184 }
185 }
186
187 fn raise_high_water(&mut self, blocks: i32) {
188 let mut grow_extent = conversions::pages_to_bytes((self.pages_per_block * blocks) as _);
189 assert_ne!(
190 self.high_water, self.limit,
191 "Attempt to grow FreeList beyond limit"
192 );
193 if self.high_water + grow_extent > self.limit {
194 grow_extent = self.high_water - self.limit;
195 }
196 self.mmap(self.high_water, grow_extent);
197 self.high_water += grow_extent;
198 }
199
200 fn mmap(&self, start: Address, bytes: usize) {
201 let res = OS::dzmmap(
202 start,
203 bytes,
204 self.strategy,
205 &MmapAnnotation::Misc {
206 name: "RawMemoryFreeList",
207 },
208 );
209 assert!(res.is_ok(), "Can't get more space with mmap()");
210 }
211 pub fn get_limit(&self) -> Address {
212 self.limit
213 }
214}
215
216#[cfg(test)]
220impl Drop for RawMemoryFreeList {
221 fn drop(&mut self) {
222 let len = self.high_water - self.base;
223 if len != 0 {
224 let _ = OS::munmap(self.base, len);
225 }
226 }
227}
228
229#[cfg(test)]
240mod tests {
241 use super::FreeList;
242 use super::*;
243 use std::sync::{Mutex, MutexGuard};
244
245 const TOP_SENTINEL: i32 = -1;
246 const FIRST_UNIT: i32 = 0;
247
248 lazy_static! {
249 static ref MUTEX: Mutex<()> = Mutex::new(());
253 }
254
255 fn new_raw_memory_freelist<'a>(
256 list_size: usize,
257 grain: i32,
258 ) -> (MutexGuard<'a, ()>, RawMemoryFreeList, i32, i32, i32) {
259 let guard = match MUTEX.lock() {
267 Ok(guard) => guard,
268 Err(poisoned) => poisoned.into_inner(),
269 };
270 let start = crate::util::test_util::RAW_MEMORY_FREELIST_TEST_REGION.start;
271 let extent = BYTES_IN_PAGE;
272 let pages_per_block = RawMemoryFreeList::default_block_size(list_size as _, 1);
273 assert_eq!(pages_per_block, 1);
274 let mut l = RawMemoryFreeList::new(
275 start,
276 start + extent,
277 pages_per_block,
278 list_size as _,
279 grain,
280 1,
281 MmapStrategy::TEST,
282 );
283 l.grow_freelist(list_size as _);
285 let last_unit = list_size as i32 - grain;
286 let bottom_sentinel = list_size as i32;
287 (guard, l, list_size as _, last_unit, bottom_sentinel)
288 }
289
290 #[test]
291 #[allow(clippy::cognitive_complexity)] fn new_free_list_grain1() {
293 let (_guard, l, _, last_unit, bottom_sentinel) = new_raw_memory_freelist(5, 1);
294 assert_eq!(l.head(), TOP_SENTINEL);
295
296 assert_eq!(l.get_prev(TOP_SENTINEL), last_unit);
297 assert_eq!(l.get_next(TOP_SENTINEL), FIRST_UNIT);
298
299 assert_eq!(l.get_size(FIRST_UNIT), 1);
300 assert_eq!(l.get_left(FIRST_UNIT), -1);
301 assert_eq!(l.get_prev(FIRST_UNIT), -1);
302 assert_eq!(l.get_right(FIRST_UNIT), 1);
303 assert_eq!(l.get_next(FIRST_UNIT), 1);
304 assert!(l.is_free(FIRST_UNIT));
305 assert!(l.is_coalescable(FIRST_UNIT));
306 assert!(!l.is_multi(FIRST_UNIT));
307
308 assert_eq!(l.get_size(1), 1);
309 assert_eq!(l.get_left(1), 0);
310 assert_eq!(l.get_prev(1), 0);
311 assert_eq!(l.get_right(1), 2);
312 assert_eq!(l.get_next(1), 2);
313 assert!(l.is_free(1));
314 assert!(l.is_coalescable(1));
315 assert!(!l.is_multi(1));
316
317 assert_eq!(l.get_size(last_unit), 1);
318 assert_eq!(l.get_left(last_unit), last_unit - 1);
319 assert_eq!(l.get_prev(last_unit), last_unit - 1);
320 assert_eq!(l.get_right(last_unit), bottom_sentinel);
321 assert_eq!(l.get_next(last_unit), -1);
322 assert!(l.is_free(last_unit));
323 assert!(l.is_coalescable(last_unit));
324 assert!(!l.is_multi(last_unit));
325
326 assert_eq!(l.get_prev(bottom_sentinel), bottom_sentinel);
327 assert_eq!(l.get_next(bottom_sentinel), bottom_sentinel);
328 }
329
330 #[test]
331 #[allow(clippy::cognitive_complexity)] fn new_free_list_grain2() {
333 let (_guard, l, _, last_unit, bottom_sentinel) = new_raw_memory_freelist(6, 2);
334 assert_eq!(l.head(), TOP_SENTINEL);
335
336 assert_eq!(l.get_prev(TOP_SENTINEL), last_unit);
337 assert_eq!(l.get_next(TOP_SENTINEL), FIRST_UNIT);
338
339 assert_eq!(l.get_size(FIRST_UNIT), 2);
340 assert_eq!(l.get_left(FIRST_UNIT), -1);
341 assert_eq!(l.get_prev(FIRST_UNIT), -1);
342 assert_eq!(l.get_right(FIRST_UNIT), 2);
343 assert_eq!(l.get_next(FIRST_UNIT), 2);
344 assert!(l.is_free(FIRST_UNIT));
345 assert!(l.is_coalescable(FIRST_UNIT));
346 assert!(l.is_multi(FIRST_UNIT));
347
348 assert_eq!(l.get_size(2), 2);
349 assert_eq!(l.get_left(2), 0);
350 assert_eq!(l.get_prev(2), 0);
351 assert_eq!(l.get_right(2), 4);
352 assert_eq!(l.get_next(2), 4);
353 assert!(l.is_free(2));
354 assert!(l.is_coalescable(2));
355 assert!(l.is_multi(2));
356
357 assert_eq!(l.get_size(last_unit), 2);
358 assert_eq!(l.get_left(last_unit), last_unit - 2);
359 assert_eq!(l.get_prev(last_unit), last_unit - 2);
360 assert_eq!(l.get_right(last_unit), bottom_sentinel);
361 assert_eq!(l.get_next(last_unit), -1);
362 assert!(l.is_free(last_unit));
363 assert!(l.is_coalescable(last_unit));
364 assert!(l.is_multi(last_unit));
365
366 assert_eq!(l.get_prev(bottom_sentinel), bottom_sentinel);
367 assert_eq!(l.get_next(bottom_sentinel), bottom_sentinel);
368 }
369
370 #[test]
371 #[should_panic]
372 fn free_list_access_out_of_bounds() {
373 let (_guard, l, _, _, _) = new_raw_memory_freelist(5, 1);
374 l.get_size(4096);
375 }
377
378 #[test]
379 fn alloc_fit() {
380 let (_guard, mut l, _, last_unit, _) = new_raw_memory_freelist(6, 2);
381 let result = l.alloc(2);
382 assert_eq!(result, 0);
383
384 const NEXT: i32 = 2;
385
386 assert_eq!(l.get_prev(TOP_SENTINEL), last_unit);
387 assert_eq!(l.get_next(TOP_SENTINEL), NEXT);
388
389 assert_eq!(l.get_size(FIRST_UNIT), 2);
390 assert_eq!(l.get_left(FIRST_UNIT), -1);
391 assert_eq!(l.get_prev(FIRST_UNIT), -1);
392 assert_eq!(l.get_right(FIRST_UNIT), 2);
393 assert_eq!(l.get_next(FIRST_UNIT), 2);
394 assert!(!l.is_free(FIRST_UNIT)); assert!(l.is_coalescable(FIRST_UNIT));
396 assert!(l.is_multi(FIRST_UNIT));
397
398 assert_eq!(l.get_size(2), 2);
399 assert_eq!(l.get_left(2), 0);
400 assert_eq!(l.get_prev(2), -1); assert_eq!(l.get_right(2), 4);
402 assert_eq!(l.get_next(2), 4);
403 assert!(l.is_free(2));
404 assert!(l.is_coalescable(2));
405 assert!(l.is_multi(2));
406 }
407
408 #[test]
409 #[allow(clippy::cognitive_complexity)] fn alloc_split() {
411 let (_guard, mut l, _, last_unit, _) = new_raw_memory_freelist(6, 2);
412 let result = l.alloc(1);
413 assert_eq!(result, 0);
414
415 const NEXT: i32 = 1;
416 assert_eq!(l.get_prev(TOP_SENTINEL), last_unit);
417 assert_eq!(l.get_next(TOP_SENTINEL), NEXT);
418
419 assert_eq!(l.get_size(FIRST_UNIT), 1);
420 assert_eq!(l.get_left(FIRST_UNIT), -1);
421 assert_eq!(l.get_prev(FIRST_UNIT), 1); assert_eq!(l.get_right(FIRST_UNIT), 1); assert_eq!(l.get_next(FIRST_UNIT), 2);
424 assert!(!l.is_free(FIRST_UNIT)); assert!(l.is_coalescable(FIRST_UNIT));
426 assert!(!l.is_multi(FIRST_UNIT)); assert_eq!(l.get_size(1), 1);
429 assert_eq!(l.get_left(1), 0); assert_eq!(l.get_prev(1), -1); assert_eq!(l.get_right(1), 2);
432 assert_eq!(l.get_next(1), 2);
433 assert!(l.is_free(1)); assert!(l.is_coalescable(1));
435 assert!(!l.is_multi(1)); assert_eq!(l.get_size(2), 2);
438 assert_eq!(l.get_left(2), 1);
439 assert_eq!(l.get_prev(2), 1); assert_eq!(l.get_right(2), 4);
441 assert_eq!(l.get_next(2), 4);
442 assert!(l.is_free(2));
443 assert!(l.is_coalescable(2));
444 assert!(l.is_multi(2));
445 }
446
447 #[test]
448 fn alloc_split_twice() {
449 let (_guard, mut l, _, _, _) = new_raw_memory_freelist(6, 2);
450 let res1 = l.alloc(1);
452 assert_eq!(res1, 0);
453 let res2 = l.alloc(1);
455 assert_eq!(res2, 1);
456
457 assert_eq!(l.get_prev(2), -1);
459 }
460
461 #[test]
462 fn alloc_skip() {
463 let (_guard, mut l, _, _, _) = new_raw_memory_freelist(6, 2);
464 let res1 = l.alloc(1);
466 assert_eq!(res1, 0);
467 let res2 = l.alloc(2);
469 assert_eq!(res2, 2);
470
471 assert!(l.is_free(1));
473 assert_eq!(l.get_next(1), 4);
474 assert_eq!(l.get_prev(4), 1);
475 }
476
477 #[test]
478 fn alloc_exhaust() {
479 let (_guard, mut l, _, _, _) = new_raw_memory_freelist(6, 2);
480 let res1 = l.alloc(2);
481 assert_eq!(res1, 0);
482 let res2 = l.alloc(2);
483 assert_eq!(res2, 2);
484 let res3 = l.alloc(2);
485 assert_eq!(res3, 4);
486 let res4 = l.alloc(2);
487 assert_eq!(res4, FAILURE);
488 }
489
490 #[test]
491 fn free_unit() {
492 let (_guard, mut l, _, _, _) = new_raw_memory_freelist(6, 2);
493 let res1 = l.alloc(2);
494 assert_eq!(res1, 0);
495 let res2 = l.alloc(2);
496 assert_eq!(res2, 2);
497
498 assert_eq!(l.get_prev(4), -1);
500
501 let freed = l.free(res2, false);
503 assert_eq!(freed, res2);
504 assert!(l.is_free(res2));
505 }
506
507 #[test]
508 fn free_coalesce() {
509 let (_guard, mut l, _, _, _) = new_raw_memory_freelist(6, 2);
510 let res1 = l.alloc(2);
511 assert_eq!(res1, 0);
512 let res2 = l.alloc(2);
513 assert_eq!(res2, 2);
514
515 let coalesced_size = l.free(res2, true);
517 assert_eq!(coalesced_size, 4);
518 }
519
520 #[test]
521 fn free_cant_coalesce() {
522 let (_guard, mut l, _, _, _) = new_raw_memory_freelist(6, 2);
523 let res1 = l.alloc(2);
524 assert_eq!(res1, 0);
525 let res2 = l.alloc(2);
526 assert_eq!(res2, 2);
527 let res3 = l.alloc(1);
528 assert_eq!(res3, 4);
529
530 let coalesced_size = l.free(res2, true);
532 assert_eq!(coalesced_size, 2);
533 }
534
535 #[test]
536 fn free_realloc() {
537 let (_guard, mut l, _, _, _) = new_raw_memory_freelist(6, 2);
538 let res1 = l.alloc(2);
539 assert_eq!(res1, 0);
540 let res2 = l.alloc(2);
541 assert_eq!(res2, 2);
542
543 assert_eq!(l.get_prev(4), -1);
545
546 let freed = l.free(res2, false);
548 assert_eq!(freed, res2);
549 assert!(l.is_free(res2));
550
551 let res3 = l.alloc(2);
553 assert_eq!(res3, 2);
554 assert!(!l.is_free(res3));
555
556 let res4 = l.alloc(1);
557 assert_eq!(res4, 4);
558 }
559}