mmtk/util/heap/layout/
map64.rs

1use super::map::CreateFreeListResult;
2use super::map::VMMap;
3use crate::util::constants::*;
4use crate::util::conversions;
5use crate::util::freelist::FreeList;
6use crate::util::heap::layout::heap_parameters::*;
7use crate::util::heap::layout::vm_layout::*;
8use crate::util::heap::space_descriptor::SpaceDescriptor;
9use crate::util::memory::MmapStrategy;
10use crate::util::raw_memory_freelist::RawMemoryFreeList;
11use crate::util::Address;
12use std::cell::UnsafeCell;
13
14const NON_MAP_FRACTION: f64 = 1.0 - 8.0 / 4096.0;
15
16pub struct Map64 {
17    inner: UnsafeCell<Map64Inner>,
18}
19
20struct Map64Inner {
21    finalized: bool,
22    descriptor_map: Vec<SpaceDescriptor>,
23    base_address: Vec<Address>,
24    high_water: Vec<Address>,
25}
26
27unsafe impl Send for Map64 {}
28unsafe impl Sync for Map64 {}
29
30impl Map64 {
31    pub fn new() -> Self {
32        let mut high_water = vec![Address::ZERO; MAX_SPACES];
33        let mut base_address = vec![Address::ZERO; MAX_SPACES];
34
35        for i in 0..MAX_SPACES {
36            let base = unsafe { Address::from_usize(i << vm_layout().log_space_extent) };
37            high_water[i] = base;
38            base_address[i] = base;
39        }
40
41        let descriptor_map = vec![SpaceDescriptor::UNINITIALIZED; MAX_SPACES];
42
43        Self {
44            inner: UnsafeCell::new(Map64Inner {
45                descriptor_map,
46                high_water,
47                base_address,
48                finalized: false,
49            }),
50        }
51    }
52}
53
54impl VMMap for Map64 {
55    fn insert(&self, start: Address, extent: usize, descriptor: SpaceDescriptor) {
56        debug_assert!(Self::is_space_start(start));
57        debug_assert!(extent <= vm_layout().space_size_64());
58        // Each space will call this on exclusive address ranges. It is fine to mutate the descriptor map,
59        // as each space will update different indices.
60        let self_mut = unsafe { self.mut_self() };
61        let index = Self::space_index(start).unwrap();
62        self_mut.descriptor_map[index] = descriptor;
63    }
64
65    fn create_freelist(&self, start: Address) -> CreateFreeListResult {
66        let units = vm_layout().space_size_64() >> LOG_BYTES_IN_PAGE;
67        self.create_parent_freelist(start, units, units as _)
68    }
69
70    fn create_parent_freelist(
71        &self,
72        start: Address,
73        mut units: usize,
74        grain: i32,
75    ) -> CreateFreeListResult {
76        debug_assert!(start.is_aligned_to(BYTES_IN_CHUNK));
77
78        // This is only called during creating a page resource/space/plan/mmtk instance, which is single threaded.
79        let self_mut = unsafe { self.mut_self() };
80        let index = Self::space_index(start).unwrap();
81
82        units = (units as f64 * NON_MAP_FRACTION) as _;
83        let list_extent =
84            conversions::pages_to_bytes(RawMemoryFreeList::size_in_pages(units as _, 1) as _);
85
86        let heads = 1;
87        let pages_per_block = RawMemoryFreeList::default_block_size(units as _, heads);
88        let list = Box::new(RawMemoryFreeList::new(
89            start,
90            start + list_extent,
91            pages_per_block,
92            units as _,
93            grain,
94            heads,
95            MmapStrategy::INTERNAL_MEMORY,
96        ));
97
98        /* Adjust the base address and highwater to account for the allocated chunks for the map */
99        let base = conversions::chunk_align_up(start + list_extent);
100
101        self_mut.high_water[index] = base;
102        self_mut.base_address[index] = base;
103
104        let space_displacement = base - start;
105        CreateFreeListResult {
106            free_list: list,
107            space_displacement,
108        }
109    }
110
111    /// # Safety
112    ///
113    /// Caller must ensure that only one thread is calling this method.
114    unsafe fn allocate_contiguous_chunks(
115        &self,
116        descriptor: SpaceDescriptor,
117        chunks: usize,
118        _head: Address,
119        maybe_freelist: Option<&mut dyn FreeList>,
120    ) -> Address {
121        debug_assert!(Self::space_index(descriptor.get_start()).unwrap() == descriptor.get_index());
122        // Each space will call this on exclusive address ranges. It is fine to mutate the descriptor map,
123        // as each space will update different indices.
124        let self_mut = self.mut_self();
125
126        let index = descriptor.get_index();
127        let rtn = self.inner().high_water[index];
128        let extent = chunks << LOG_BYTES_IN_CHUNK;
129        self_mut.high_water[index] = rtn + extent;
130
131        if let Some(freelist) = maybe_freelist {
132            let Some(rmfl) = freelist.downcast_mut::<RawMemoryFreeList>() else {
133                // `Map64` allocates chunks by raising the high water mark to provide previously
134                // uncovered address range to the caller.  Therefore if the `PageResource` that
135                // made the allocation request is based on freelist, the freelist must be grown to
136                // accommodate the new chunks.  Currently only `RawMemoryFreeList` can grow.
137                panic!("Map64 requires a growable free list implementation (RawMemoryFreeList).");
138            };
139            rmfl.grow_freelist(conversions::bytes_to_pages_up(extent) as _);
140            let base_page = conversions::bytes_to_pages_up(rtn - self.inner().base_address[index]);
141            for offset in (0..(chunks * PAGES_IN_CHUNK)).step_by(PAGES_IN_CHUNK) {
142                rmfl.set_uncoalescable((base_page + offset) as _);
143                /* The 32-bit implementation requires that pages are returned allocated to the caller */
144                rmfl.alloc_from_unit(PAGES_IN_CHUNK as _, (base_page + offset) as _);
145            }
146        }
147        rtn
148    }
149
150    fn get_next_contiguous_region(&self, _start: Address) -> Address {
151        unreachable!()
152    }
153
154    fn get_contiguous_region_chunks(&self, _start: Address) -> usize {
155        unreachable!()
156    }
157
158    fn get_contiguous_region_size(&self, _start: Address) -> usize {
159        unreachable!()
160    }
161
162    fn get_available_discontiguous_chunks(&self) -> usize {
163        panic!("We don't use discontiguous chunks for 64-bit!");
164    }
165
166    fn get_chunk_consumer_count(&self) -> usize {
167        panic!("We don't use discontiguous chunks for 64-bit!");
168    }
169
170    fn free_all_chunks(&self, _any_chunk: Address) {
171        unreachable!()
172    }
173
174    unsafe fn free_contiguous_chunks(&self, _start: Address) -> usize {
175        unreachable!()
176    }
177
178    fn finalize_static_space_map(
179        &self,
180        _from: Address,
181        _to: Address,
182        _on_discontig_start_determined: &mut dyn FnMut(Address),
183    ) {
184        // This is only called during boot process by a single thread.
185        // It is fine to get a mutable reference.
186        let self_mut: &mut Map64Inner = unsafe { self.mut_self() };
187
188        // Note: When using Map64, the starting address of each space is adjusted as soon as the
189        // `RawMemoryFreeList` instance in its underlying `FreeListPageResource` is created.  We no
190        // longer need to adjust the starting address here.  So we ignore the
191        // `_on_discontig_start_determined` callback which may adjust the starting address.
192
193        self_mut.finalized = true;
194    }
195
196    fn is_finalized(&self) -> bool {
197        self.inner().finalized
198    }
199
200    fn get_descriptor_for_address(&self, address: Address) -> SpaceDescriptor {
201        if let Some(index) = Self::space_index(address) {
202            self.inner().descriptor_map[index]
203        } else {
204            SpaceDescriptor::UNINITIALIZED
205        }
206    }
207}
208
209impl Map64 {
210    /// # Safety
211    ///
212    /// The caller needs to guarantee there is no race condition. Either only one single thread
213    /// is using this method, or multiple threads are accessing mutally exclusive data (e.g. different indices in arrays).
214    /// In other cases, use mut_self_with_sync().
215    #[allow(clippy::mut_from_ref)]
216    unsafe fn mut_self(&self) -> &mut Map64Inner {
217        &mut *self.inner.get()
218    }
219
220    fn inner(&self) -> &Map64Inner {
221        unsafe { &*self.inner.get() }
222    }
223
224    fn space_index(addr: Address) -> Option<usize> {
225        if addr > vm_layout().heap_end {
226            return None;
227        }
228        Some(addr >> vm_layout().space_shift_64())
229    }
230
231    fn is_space_start(base: Address) -> bool {
232        (base & !vm_layout().space_mask_64()) == 0
233    }
234}
235
236impl Default for Map64 {
237    fn default() -> Self {
238        Self::new()
239    }
240}