mmtk/util/heap/
pageresource.rs

1use crate::util::address::Address;
2use crate::util::conversions;
3use crate::util::freelist::FreeList;
4use crate::util::opaque_pointer::*;
5use std::sync::Mutex;
6
7use super::layout::VMMap;
8use crate::util::heap::space_descriptor::SpaceDescriptor;
9use crate::util::heap::PageAccounting;
10use crate::vm::VMBinding;
11
12pub trait PageResource<VM: VMBinding>: 'static {
13    /// Allocate pages from this resource.
14    /// Simply bump the cursor, and fail if we hit the sentinel.
15    /// Return The start of the first page if successful, zero on failure.
16    fn get_new_pages(
17        &self,
18        space_descriptor: SpaceDescriptor,
19        reserved_pages: usize,
20        required_pages: usize,
21        tls: VMThread,
22    ) -> Result<PRAllocResult, PRAllocFail> {
23        self.alloc_pages(space_descriptor, reserved_pages, required_pages, tls)
24    }
25
26    // XXX: In the original code reserve_pages & clear_request explicitly
27    //      acquired a lock.
28    fn reserve_pages(&self, pages: usize) -> usize {
29        self.common().accounting.reserve(pages);
30        pages
31    }
32
33    fn clear_request(&self, reserved_pages: usize) {
34        self.common().accounting.clear_reserved(reserved_pages);
35    }
36
37    fn update_zeroing_approach(&self, _nontemporal: bool, concurrent: bool) {
38        debug_assert!(!concurrent || self.common().contiguous);
39        unimplemented!()
40    }
41
42    fn skip_concurrent_zeroing(&self) {
43        unimplemented!()
44    }
45
46    fn trigger_concurrent_zeroing(&self) {
47        unimplemented!()
48    }
49
50    fn concurrent_zeroing(&self) {
51        panic!("This PageResource does not implement concurrent zeroing")
52    }
53
54    fn alloc_pages(
55        &self,
56        space_descriptor: SpaceDescriptor,
57        reserved_pages: usize,
58        required_pages: usize,
59        tls: VMThread,
60    ) -> Result<PRAllocResult, PRAllocFail>;
61
62    /**
63     * Commit pages to the page budget.  This is called after
64     * successfully determining that the request can be satisfied by
65     * both the page budget and virtual memory.  This simply accounts
66     * for the discrepancy between <code>committed</code> and
67     * <code>reserved</code> while the request was pending.
68     *
69     * This *MUST* be called by each PageResource during the
70     * allocPages, and the caller must hold the lock.
71     */
72    fn commit_pages(&self, reserved_pages: usize, actual_pages: usize, _tls: VMThread) {
73        let delta = actual_pages - reserved_pages;
74        self.common().accounting.reserve(delta);
75        self.common().accounting.commit(actual_pages);
76    }
77
78    fn reserved_pages(&self) -> usize {
79        self.common().accounting.get_reserved_pages()
80    }
81
82    fn committed_pages(&self) -> usize {
83        self.common().accounting.get_committed_pages()
84    }
85
86    /// Return the number of available physical pages by this resource. This includes all pages
87    /// currently unused by this resource. If the resource is using a discontiguous space, it also
88    /// includes the currently unassigned discontiguous space.
89    ///
90    /// Note: This just considers physical pages (i.e. virtual memory pages allocated for use by
91    /// this resource). This calculation is orthogonal to and does not consider any restrictions on
92    /// the number of pages this resource may actually use at any time (i.e. the number of
93    /// committed and reserved pages).
94    ///
95    /// Note: The calculation is made on the assumption that all space that could be assigned to
96    /// this resource would be assigned to this resource (i.e. the unused discontiguous space could
97    /// just as likely be assigned to another competing resource).
98    fn get_available_physical_pages(&self) -> usize;
99
100    fn common(&self) -> &CommonPageResource;
101    fn common_mut(&mut self) -> &mut CommonPageResource;
102    fn vm_map(&self) -> &'static dyn VMMap {
103        self.common().vm_map
104    }
105
106    // Some page resources need to record the start address.
107    // This method will be called after the start address of the discontigous region is determined.
108    // `start` is the computed start address.  By default, this does nothing.
109    fn update_discontiguous_start(&mut self, _start: Address) {
110        // Do nothing.
111    }
112}
113
114pub struct PRAllocResult {
115    pub start: Address,
116    pub pages: usize,
117    pub new_chunk: bool,
118}
119
120pub struct PRAllocFail;
121
122pub struct CommonPageResource {
123    pub accounting: PageAccounting,
124    pub contiguous: bool,
125    pub growable: bool,
126
127    pub vm_map: &'static dyn VMMap,
128    head_discontiguous_region: Mutex<Address>,
129}
130
131impl CommonPageResource {
132    pub fn new(contiguous: bool, growable: bool, vm_map: &'static dyn VMMap) -> CommonPageResource {
133        CommonPageResource {
134            accounting: PageAccounting::new(),
135
136            contiguous,
137            growable,
138            vm_map,
139
140            head_discontiguous_region: Mutex::new(Address::ZERO),
141        }
142    }
143
144    /// Extend the virtual memory associated with a particular discontiguous
145    /// space.  This simply involves requesting a suitable number of chunks
146    /// from the pool of chunks available to discontiguous spaces.
147    ///
148    /// If the concrete page resource is using a `FreeList`, it should pass it
149    /// via `freelist`, or `None` if not using `FreeList`.
150    pub fn grow_discontiguous_space(
151        &self,
152        space_descriptor: SpaceDescriptor,
153        chunks: usize,
154        freelist: Option<&mut dyn FreeList>,
155    ) -> Address {
156        let mut head_discontiguous_region = self.head_discontiguous_region.lock().unwrap();
157
158        let new_head: Address = unsafe {
159            self.vm_map.allocate_contiguous_chunks(
160                space_descriptor,
161                chunks,
162                *head_discontiguous_region,
163                freelist,
164            )
165        };
166        if new_head.is_zero() {
167            return Address::ZERO;
168        }
169
170        *head_discontiguous_region = new_head;
171        new_head
172    }
173
174    /// Release one or more contiguous chunks associated with a discontiguous
175    /// space.
176    pub fn release_discontiguous_chunks(&self, chunk: Address) {
177        let mut head_discontiguous_region = self.head_discontiguous_region.lock().unwrap();
178        debug_assert!(chunk == conversions::chunk_align_down(chunk));
179        if chunk == *head_discontiguous_region {
180            *head_discontiguous_region = self.vm_map.get_next_contiguous_region(chunk);
181        }
182        unsafe {
183            self.vm_map.free_contiguous_chunks(chunk);
184        }
185    }
186
187    pub fn release_all_chunks(&self) {
188        let mut head_discontiguous_region = self.head_discontiguous_region.lock().unwrap();
189        self.vm_map.free_all_chunks(*head_discontiguous_region);
190        *head_discontiguous_region = Address::ZERO;
191    }
192
193    pub fn get_head_discontiguous_region(&self) -> Address {
194        *self.head_discontiguous_region.lock().unwrap()
195    }
196}