mmtk/util/heap/
pageresource.rs1use crate::util::address::Address;
2use crate::util::conversions;
3use crate::util::freelist::FreeList;
4use crate::util::opaque_pointer::*;
5use std::sync::Mutex;
6
7use super::layout::VMMap;
8use crate::util::heap::space_descriptor::SpaceDescriptor;
9use crate::util::heap::PageAccounting;
10use crate::vm::VMBinding;
11
12pub trait PageResource<VM: VMBinding>: 'static {
13 fn get_new_pages(
17 &self,
18 space_descriptor: SpaceDescriptor,
19 reserved_pages: usize,
20 required_pages: usize,
21 tls: VMThread,
22 ) -> Result<PRAllocResult, PRAllocFail> {
23 self.alloc_pages(space_descriptor, reserved_pages, required_pages, tls)
24 }
25
26 fn reserve_pages(&self, pages: usize) -> usize {
29 self.common().accounting.reserve(pages);
30 pages
31 }
32
33 fn clear_request(&self, reserved_pages: usize) {
34 self.common().accounting.clear_reserved(reserved_pages);
35 }
36
37 fn update_zeroing_approach(&self, _nontemporal: bool, concurrent: bool) {
38 debug_assert!(!concurrent || self.common().contiguous);
39 unimplemented!()
40 }
41
42 fn skip_concurrent_zeroing(&self) {
43 unimplemented!()
44 }
45
46 fn trigger_concurrent_zeroing(&self) {
47 unimplemented!()
48 }
49
50 fn concurrent_zeroing(&self) {
51 panic!("This PageResource does not implement concurrent zeroing")
52 }
53
54 fn alloc_pages(
55 &self,
56 space_descriptor: SpaceDescriptor,
57 reserved_pages: usize,
58 required_pages: usize,
59 tls: VMThread,
60 ) -> Result<PRAllocResult, PRAllocFail>;
61
62 fn commit_pages(&self, reserved_pages: usize, actual_pages: usize, _tls: VMThread) {
73 let delta = actual_pages - reserved_pages;
74 self.common().accounting.reserve(delta);
75 self.common().accounting.commit(actual_pages);
76 }
77
78 fn reserved_pages(&self) -> usize {
79 self.common().accounting.get_reserved_pages()
80 }
81
82 fn committed_pages(&self) -> usize {
83 self.common().accounting.get_committed_pages()
84 }
85
86 fn get_available_physical_pages(&self) -> usize;
99
100 fn common(&self) -> &CommonPageResource;
101 fn common_mut(&mut self) -> &mut CommonPageResource;
102 fn vm_map(&self) -> &'static dyn VMMap {
103 self.common().vm_map
104 }
105
106 fn update_discontiguous_start(&mut self, _start: Address) {
110 }
112}
113
114pub struct PRAllocResult {
115 pub start: Address,
116 pub pages: usize,
117 pub new_chunk: bool,
118}
119
120pub struct PRAllocFail;
121
122pub struct CommonPageResource {
123 pub accounting: PageAccounting,
124 pub contiguous: bool,
125 pub growable: bool,
126
127 pub vm_map: &'static dyn VMMap,
128 head_discontiguous_region: Mutex<Address>,
129}
130
131impl CommonPageResource {
132 pub fn new(contiguous: bool, growable: bool, vm_map: &'static dyn VMMap) -> CommonPageResource {
133 CommonPageResource {
134 accounting: PageAccounting::new(),
135
136 contiguous,
137 growable,
138 vm_map,
139
140 head_discontiguous_region: Mutex::new(Address::ZERO),
141 }
142 }
143
144 pub fn grow_discontiguous_space(
151 &self,
152 space_descriptor: SpaceDescriptor,
153 chunks: usize,
154 freelist: Option<&mut dyn FreeList>,
155 ) -> Address {
156 let mut head_discontiguous_region = self.head_discontiguous_region.lock().unwrap();
157
158 let new_head: Address = unsafe {
159 self.vm_map.allocate_contiguous_chunks(
160 space_descriptor,
161 chunks,
162 *head_discontiguous_region,
163 freelist,
164 )
165 };
166 if new_head.is_zero() {
167 return Address::ZERO;
168 }
169
170 *head_discontiguous_region = new_head;
171 new_head
172 }
173
174 pub fn release_discontiguous_chunks(&self, chunk: Address) {
177 let mut head_discontiguous_region = self.head_discontiguous_region.lock().unwrap();
178 debug_assert!(chunk == conversions::chunk_align_down(chunk));
179 if chunk == *head_discontiguous_region {
180 *head_discontiguous_region = self.vm_map.get_next_contiguous_region(chunk);
181 }
182 unsafe {
183 self.vm_map.free_contiguous_chunks(chunk);
184 }
185 }
186
187 pub fn release_all_chunks(&self) {
188 let mut head_discontiguous_region = self.head_discontiguous_region.lock().unwrap();
189 self.vm_map.free_all_chunks(*head_discontiguous_region);
190 *head_discontiguous_region = Address::ZERO;
191 }
192
193 pub fn get_head_discontiguous_region(&self) -> Address {
194 *self.head_discontiguous_region.lock().unwrap()
195 }
196}