mmtk/util/heap/
regionpageresource.rs

1use crate::util::constants::BYTES_IN_PAGE;
2use crate::util::heap::layout::VMMap;
3use crate::util::heap::pageresource::{CommonPageResource, PRAllocFail, PRAllocResult};
4use crate::util::heap::space_descriptor::SpaceDescriptor;
5use crate::util::heap::{MonotonePageResource, PageResource};
6use crate::util::linear_scan::Region;
7use crate::util::object_enum::ObjectEnumerator;
8use crate::util::Address;
9use crate::util::VMThread;
10use crate::vm::VMBinding;
11use atomic::Atomic;
12use std::sync::atomic::Ordering;
13use std::sync::RwLock;
14
15/// A region in a [`RegionPageResource`] and its allocation cursor.
16pub struct AllocatedRegion<R: Region> {
17    pub region: R,
18    cursor: Atomic<Address>,
19}
20
21impl<R: Region> AllocatedRegion<R> {
22    pub fn cursor(&self) -> Address {
23        self.cursor.load(Ordering::Relaxed)
24    }
25
26    fn set_cursor(&self, a: Address) {
27        self.cursor.store(a, Ordering::Relaxed);
28    }
29}
30
31struct Sync<R: Region> {
32    all_regions: Vec<AllocatedRegion<R>>,
33    next_region: usize,
34}
35
36/// A [`PageResource`] which allocates pages from a region-structured heap.
37/// We assume that allocations are much smaller than regions, as we
38/// scan linearly over all regions to allocate, and do not revisit regions
39/// before a garbage collection cycle.
40pub struct RegionPageResource<VM: VMBinding, R: Region> {
41    mpr: MonotonePageResource<VM>,
42    sync: RwLock<Sync<R>>,
43}
44
45impl<VM: VMBinding, R: Region + 'static> PageResource<VM> for RegionPageResource<VM, R> {
46    fn common(&self) -> &CommonPageResource {
47        self.mpr.common()
48    }
49
50    fn common_mut(&mut self) -> &mut CommonPageResource {
51        self.mpr.common_mut()
52    }
53
54    fn update_discontiguous_start(&mut self, start: Address) {
55        self.mpr.update_discontiguous_start(start)
56    }
57
58    fn alloc_pages(
59        &self,
60        space_descriptor: SpaceDescriptor,
61        reserved_pages: usize,
62        required_pages: usize,
63        tls: VMThread,
64    ) -> Result<PRAllocResult, PRAllocFail> {
65        assert!(reserved_pages <= Self::REGION_PAGES);
66        assert!(required_pages <= reserved_pages);
67        self.alloc(space_descriptor, reserved_pages, required_pages, tls)
68    }
69
70    fn get_available_physical_pages(&self) -> usize {
71        self.mpr.get_available_physical_pages()
72    }
73}
74
75impl<VM: VMBinding, R: Region + 'static> RegionPageResource<VM, R> {
76    const REGION_PAGES: usize = R::BYTES / BYTES_IN_PAGE;
77
78    pub fn new_contiguous(start: Address, bytes: usize, vm_map: &'static dyn VMMap) -> Self {
79        Self::new(MonotonePageResource::new_contiguous(start, bytes, vm_map))
80    }
81
82    pub fn new_discontiguous(vm_map: &'static dyn VMMap) -> Self {
83        Self::new(MonotonePageResource::new_discontiguous(vm_map))
84    }
85
86    fn new(mpr: MonotonePageResource<VM>) -> Self {
87        Self {
88            mpr,
89            sync: RwLock::new(Sync {
90                all_regions: vec![],
91                next_region: 0,
92            }),
93        }
94    }
95
96    fn alloc(
97        &self,
98        space_descriptor: SpaceDescriptor,
99        reserved_pages: usize,
100        required_pages: usize,
101        tls: VMThread,
102    ) -> Result<PRAllocResult, PRAllocFail> {
103        let mut b = self.sync.write().unwrap();
104        let succeed = |start: Address, new_chunk: bool| {
105            Result::Ok(PRAllocResult {
106                start,
107                pages: required_pages,
108                new_chunk,
109            })
110        };
111        let bytes = reserved_pages * BYTES_IN_PAGE;
112        // First try to reuse a region.
113        while b.next_region < b.all_regions.len() {
114            let cursor = b.next_region;
115            if let Option::Some(address) =
116                self.allocate_from_region(&mut b.all_regions[cursor], bytes)
117            {
118                self.commit_pages(reserved_pages, required_pages, tls);
119                return succeed(address, false);
120            }
121            b.next_region += 1;
122        }
123        // Else allocate a new region.
124        let PRAllocResult {
125            start, new_chunk, ..
126        } = self.mpr.alloc_pages(
127            space_descriptor,
128            Self::REGION_PAGES,
129            Self::REGION_PAGES,
130            tls,
131        )?;
132        b.all_regions.push(AllocatedRegion {
133            region: R::from_aligned_address(start),
134            cursor: Atomic::<Address>::new(start),
135        });
136        let cursor = b.next_region;
137        succeed(
138            self.allocate_from_region(&mut b.all_regions[cursor], bytes)
139                .unwrap(),
140            new_chunk,
141        )
142    }
143
144    fn allocate_from_region(
145        &self,
146        alloc: &mut AllocatedRegion<R>,
147        bytes: usize,
148    ) -> Option<Address> {
149        let free = alloc.cursor();
150        if free + bytes > alloc.region.end() {
151            Option::None
152        } else {
153            alloc.set_cursor(free + bytes);
154            Option::Some(free)
155        }
156    }
157
158    /// Reset the allocation cursor for one region.
159    pub fn reset_cursor(&self, alloc: &AllocatedRegion<R>, address: Address) {
160        let old = alloc.cursor();
161        let new = address.align_up(BYTES_IN_PAGE);
162        let pages = (old - new) / BYTES_IN_PAGE;
163        self.common().accounting.release(pages);
164        alloc.set_cursor(new);
165    }
166
167    /// Reset the allocator state after a collection, so that the allocator will
168    /// revisit regions which the garbage collector has compacted.
169    pub fn reset_allocator(&self) {
170        self.sync.write().unwrap().next_region = 0;
171    }
172
173    pub fn enumerate(&self, enumerator: &mut dyn ObjectEnumerator) {
174        let sync = self.sync.read().unwrap();
175        for alloc in sync.all_regions.iter() {
176            enumerator.visit_address_range(alloc.region.start(), alloc.cursor());
177        }
178    }
179
180    pub fn with_regions<T>(&self, f: &mut impl FnMut(&Vec<AllocatedRegion<R>>) -> T) -> T {
181        let sync = self.sync.read().unwrap();
182        f(&sync.all_regions)
183    }
184
185    pub fn enumerate_regions(&self, enumerator: &mut impl FnMut(&AllocatedRegion<R>)) {
186        let sync = self.sync.read().unwrap();
187        for alloc in sync.all_regions.iter() {
188            enumerator(alloc);
189        }
190    }
191}