mmtk/util/heap/
monotonepageresource.rs

1use super::layout::vm_layout::{BYTES_IN_CHUNK, PAGES_IN_CHUNK};
2use crate::policy::space::required_chunks;
3use crate::util::address::Address;
4use crate::util::constants::BYTES_IN_PAGE;
5use crate::util::conversions::*;
6use std::ops::Range;
7use std::sync::{Mutex, MutexGuard};
8
9use crate::util::alloc::embedded_meta_data::*;
10use crate::util::heap::layout::vm_layout::LOG_BYTES_IN_CHUNK;
11use crate::util::heap::pageresource::CommonPageResource;
12use crate::util::opaque_pointer::*;
13
14use super::layout::VMMap;
15use super::pageresource::{PRAllocFail, PRAllocResult};
16use super::PageResource;
17use crate::util::heap::space_descriptor::SpaceDescriptor;
18use crate::vm::VMBinding;
19use std::marker::PhantomData;
20
21pub struct MonotonePageResource<VM: VMBinding> {
22    common: CommonPageResource,
23    sync: Mutex<MonotonePageResourceSync>,
24    _p: PhantomData<VM>,
25}
26
27struct MonotonePageResourceSync {
28    /** Pointer to the next block to be allocated. */
29    cursor: Address,
30    /** The limit of the currently allocated address space. */
31    sentinel: Address,
32    /** Base address of the current chunk of addresses */
33    current_chunk: Address,
34    conditional: MonotonePageResourceConditional,
35}
36
37pub enum MonotonePageResourceConditional {
38    Contiguous {
39        start: Address,
40        /** Current frontier of zeroing, in a separate zeroing thread */
41        zeroing_cursor: Address,
42        /** Current limit of zeroing.  If zeroingCursor < zeroingSentinel, zeroing is still happening. */
43        zeroing_sentinel: Address,
44    },
45    Discontiguous,
46}
47impl<VM: VMBinding> PageResource<VM> for MonotonePageResource<VM> {
48    fn common(&self) -> &CommonPageResource {
49        &self.common
50    }
51    fn common_mut(&mut self) -> &mut CommonPageResource {
52        &mut self.common
53    }
54
55    fn reserve_pages(&self, pages: usize) -> usize {
56        self.common().accounting.reserve(pages);
57        pages
58    }
59
60    fn get_available_physical_pages(&self) -> usize {
61        let sync = self.sync.lock().unwrap();
62        let mut rtn = bytes_to_pages_up(sync.sentinel - sync.cursor);
63        if !self.common.contiguous {
64            rtn += self.common.vm_map.get_available_discontiguous_chunks() * PAGES_IN_CHUNK;
65        }
66        rtn
67    }
68
69    fn alloc_pages(
70        &self,
71        space_descriptor: SpaceDescriptor,
72        reserved_pages: usize,
73        required_pages: usize,
74        tls: VMThread,
75    ) -> Result<PRAllocResult, PRAllocFail> {
76        debug!(
77            "In MonotonePageResource, reserved_pages = {}, required_pages = {}",
78            reserved_pages, required_pages
79        );
80        let mut new_chunk = false;
81        let mut sync = self.sync.lock().unwrap();
82        let mut rtn = sync.cursor;
83        debug!(
84            "cursor = {}, sentinel = {}, current_chunk = {}",
85            sync.cursor, sync.sentinel, sync.current_chunk
86        );
87
88        if cfg!(debug_assertions) {
89            /*
90             * Cursor should always be zero, or somewhere in the current chunk.  If we have just
91             * allocated exactly enough pages to exhaust the current chunk, then cursor can point
92             * to the next chunk.
93             */
94            if sync.current_chunk > sync.cursor
95                || (chunk_align_down(sync.cursor) != sync.current_chunk
96                    && chunk_align_down(sync.cursor) != sync.current_chunk + BYTES_IN_CHUNK)
97            {
98                self.log_chunk_fields(space_descriptor, "MonotonePageResource.alloc_pages:fail");
99            }
100            assert!(sync.current_chunk <= sync.cursor);
101            assert!(
102                sync.cursor.is_zero()
103                    || chunk_align_down(sync.cursor) == sync.current_chunk
104                    || chunk_align_down(sync.cursor) == (sync.current_chunk + BYTES_IN_CHUNK)
105            );
106        }
107
108        let bytes = pages_to_bytes(required_pages);
109        debug!("bytes={}", bytes);
110        let mut tmp = sync.cursor + bytes;
111        debug!("tmp={:?}", tmp);
112
113        if !self.common().contiguous && tmp > sync.sentinel {
114            /* we're out of virtual memory within our discontiguous region, so ask for more */
115            let required_chunks = required_chunks(required_pages);
116            sync.current_chunk =
117                self.common
118                    .grow_discontiguous_space(space_descriptor, required_chunks, None); // Returns zero on failure
119            sync.cursor = sync.current_chunk;
120            sync.sentinel = sync.cursor
121                + if sync.current_chunk.is_zero() {
122                    0
123                } else {
124                    required_chunks << LOG_BYTES_IN_CHUNK
125                };
126            //println!("{} {}->{}", self.common.space.unwrap().get_name(), sync.cursor, sync.sentinel);
127            rtn = sync.cursor;
128            tmp = sync.cursor + bytes;
129            new_chunk = true;
130        }
131
132        debug_assert!(rtn >= sync.cursor && rtn < sync.cursor + bytes);
133        if tmp > sync.sentinel {
134            //debug!("tmp={:?} > sync.sentinel={:?}", tmp, sync.sentinel);
135            Result::Err(PRAllocFail)
136        } else {
137            //debug!("tmp={:?} <= sync.sentinel={:?}", tmp, sync.sentinel);
138            sync.cursor = tmp;
139            debug!("update cursor = {}", tmp);
140
141            /* In a contiguous space we can bump along into the next chunk, so preserve the currentChunk invariant */
142            if self.common().contiguous && chunk_align_down(sync.cursor) != sync.current_chunk {
143                debug_assert!(
144                    chunk_align_down(sync.cursor) > sync.current_chunk,
145                    "Not monotonic.  chunk_align_down(sync.cursor): {}, sync.current_chunk: {}",
146                    chunk_align_down(sync.cursor),
147                    sync.current_chunk,
148                );
149                sync.current_chunk = chunk_align_down(sync.cursor);
150            }
151            self.commit_pages(reserved_pages, required_pages, tls);
152
153            Result::Ok(PRAllocResult {
154                start: rtn,
155                pages: required_pages,
156                new_chunk,
157            })
158        }
159    }
160}
161
162impl<VM: VMBinding> MonotonePageResource<VM> {
163    pub fn new_contiguous(start: Address, bytes: usize, vm_map: &'static dyn VMMap) -> Self {
164        let sentinel = start + bytes;
165
166        MonotonePageResource {
167            common: CommonPageResource::new(true, cfg!(target_pointer_width = "64"), vm_map),
168            sync: Mutex::new(MonotonePageResourceSync {
169                cursor: start,
170                current_chunk: chunk_align_down(start),
171                sentinel,
172                conditional: MonotonePageResourceConditional::Contiguous {
173                    start,
174                    zeroing_cursor: sentinel,
175                    zeroing_sentinel: start,
176                },
177            }),
178            _p: PhantomData,
179        }
180    }
181
182    pub fn new_discontiguous(vm_map: &'static dyn VMMap) -> Self {
183        MonotonePageResource {
184            common: CommonPageResource::new(false, true, vm_map),
185            sync: Mutex::new(MonotonePageResourceSync {
186                cursor: unsafe { Address::zero() },
187                current_chunk: unsafe { Address::zero() },
188                sentinel: unsafe { Address::zero() },
189                conditional: MonotonePageResourceConditional::Discontiguous,
190            }),
191            _p: PhantomData,
192        }
193    }
194
195    /// Get highwater mark of current monotone space.
196    pub fn cursor(&self) -> Address {
197        self.sync.lock().unwrap().cursor
198    }
199
200    fn log_chunk_fields(&self, space_descriptor: SpaceDescriptor, site: &str) {
201        let sync = self.sync.lock().unwrap();
202        debug!(
203            "[{:?}]{}: cursor={}, current_chunk={}, delta={}",
204            space_descriptor,
205            site,
206            sync.cursor,
207            sync.current_chunk,
208            sync.cursor - sync.current_chunk
209        );
210    }
211
212    fn get_region_start(addr: Address) -> Address {
213        addr.align_down(BYTES_IN_REGION)
214    }
215
216    /// # Safety
217    /// TODO: I am not sure why this is unsafe.
218    pub unsafe fn reset(&self) {
219        let mut guard = self.sync.lock().unwrap();
220        self.common().accounting.reset();
221        self.release_pages(&mut guard);
222        drop(guard);
223    }
224
225    pub unsafe fn get_current_chunk(&self) -> Address {
226        let guard = self.sync.lock().unwrap();
227        guard.current_chunk
228    }
229
230    /*/**
231    * Release all pages associated with this page resource, optionally
232    * zeroing on release and optionally memory protecting on release.
233    */
234     @Inline
235     private void releasePages() {
236     if (contiguous) {
237     // TODO: We will perform unnecessary zeroing if the nursery size has decreased.
238     if (zeroConcurrent) {
239     // Wait for current zeroing to finish.
240     while (zeroingCursor.LT(zeroingSentinel)) { }
241     }
242     // Reset zeroing region.
243     if (cursor.GT(zeroingSentinel)) {
244     zeroingSentinel = cursor;
245     }
246     zeroingCursor = start;
247     cursor = start;
248     currentChunk = Conversions.chunkAlign(start, true);
249     } else { /* Not contiguous */
250     if (!cursor.isZero()) {
251     do {
252     Extent bytes = cursor.diff(currentChunk).toWord().toExtent();
253     releasePages(currentChunk, bytes);
254     } while (moveToNextChunk());
255
256     currentChunk = Address.zero();
257     sentinel = Address.zero();
258     cursor = Address.zero();
259     space.releaseAllChunks();
260     }
261     }
262     }*/
263
264    pub fn reset_cursor(&self, top: Address) {
265        if self.common.contiguous {
266            let mut guard = self.sync.lock().unwrap();
267            let cursor = top.align_up(crate::util::constants::BYTES_IN_PAGE);
268            let chunk = chunk_align_down(top);
269            let space_start = match guard.conditional {
270                MonotonePageResourceConditional::Contiguous { start, .. } => start,
271                _ => unreachable!(),
272            };
273            let pages = bytes_to_pages_up(top - space_start);
274            self.common.accounting.reset();
275            self.common.accounting.reserve_and_commit(pages);
276            guard.current_chunk = chunk;
277            guard.cursor = cursor;
278        } else {
279            let mut chunk_start = self.common.get_head_discontiguous_region();
280            let mut release_regions = false;
281            let mut live_size = 0;
282            while !chunk_start.is_zero() {
283                let chunk_end = chunk_start
284                    + (self.common.vm_map.get_contiguous_region_chunks(chunk_start)
285                        << LOG_BYTES_IN_CHUNK);
286                let next_chunk_start = self.common.vm_map.get_next_contiguous_region(chunk_start);
287                if top >= chunk_start && top < chunk_end {
288                    // This is the last live chunk
289                    debug_assert!(!release_regions);
290                    let mut guard = self.sync.lock().unwrap();
291                    guard.current_chunk = chunk_start;
292                    guard.sentinel = chunk_end;
293                    guard.cursor = top.align_up(BYTES_IN_PAGE);
294                    live_size += top - chunk_start;
295                    // Release all the remaining regions
296                    release_regions = true;
297                } else if release_regions {
298                    // release this region
299                    self.common.release_discontiguous_chunks(chunk_start);
300                } else {
301                    // keep this live region
302                    live_size += chunk_end - chunk_start;
303                }
304                chunk_start = next_chunk_start;
305            }
306            let pages = bytes_to_pages_up(live_size);
307            self.common.accounting.reset();
308            self.common.accounting.reserve_and_commit(pages);
309        }
310    }
311
312    unsafe fn release_pages(&self, guard: &mut MutexGuard<MonotonePageResourceSync>) {
313        // TODO: concurrent zeroing
314        if self.common().contiguous {
315            guard.cursor = match guard.conditional {
316                MonotonePageResourceConditional::Contiguous { start: _start, .. } => _start,
317                _ => unreachable!(),
318            };
319            guard.current_chunk = guard.cursor;
320        } else if !guard.cursor.is_zero() {
321            let bytes = guard.cursor - guard.current_chunk;
322            self.release_pages_extent(guard.current_chunk, bytes);
323            while self.move_to_next_chunk(guard) {
324                let bytes = guard.cursor - guard.current_chunk;
325                self.release_pages_extent(guard.current_chunk, bytes);
326            }
327
328            guard.current_chunk = Address::zero();
329            guard.sentinel = Address::zero();
330            guard.cursor = Address::zero();
331            self.common.release_all_chunks();
332        }
333    }
334
335    /// Iterate over all contiguous memory regions in this space.
336    /// For contiguous space, this iterator should yield only once, and returning a contiguous memory region covering the whole space.
337    pub fn iterate_allocated_regions(&self) -> impl Iterator<Item = (Address, usize)> + '_ {
338        struct Iter<'a, VM: VMBinding> {
339            pr: &'a MonotonePageResource<VM>,
340            contiguous_space: Option<Range<Address>>,
341            discontiguous_start: Address,
342        }
343        impl<VM: VMBinding> Iterator for Iter<'_, VM> {
344            type Item = (Address, usize);
345            fn next(&mut self) -> Option<Self::Item> {
346                if let Some(range) = self.contiguous_space.take() {
347                    Some((range.start, range.end - range.start))
348                } else if self.discontiguous_start.is_zero() {
349                    None
350                } else {
351                    let start = self.discontiguous_start;
352                    self.discontiguous_start = self.pr.vm_map().get_next_contiguous_region(start);
353
354                    let contiguous_region_size = self.pr.vm_map().get_contiguous_region_size(start);
355                    let cursor = self.pr.cursor();
356                    let size = if start < cursor && cursor < start + contiguous_region_size {
357                        // If the current cursor is within the current discontiguous region,
358                        // then return the size till the cursor.
359                        // This is sufficient for sweeping the memory and clearing side metadata.
360                        // Note that if cursor == start,
361                        // it means the cursor is at the end of the previous chunk.
362                        cursor - start
363                    } else {
364                        contiguous_region_size
365                    };
366                    Some((start, size))
367                }
368            }
369        }
370        let sync = self.sync.lock().unwrap();
371        match sync.conditional {
372            MonotonePageResourceConditional::Contiguous { start, .. } => {
373                let cursor = sync.cursor.align_up(BYTES_IN_CHUNK);
374                Iter {
375                    pr: self,
376                    contiguous_space: Some(start..cursor),
377                    discontiguous_start: Address::ZERO,
378                }
379            }
380            MonotonePageResourceConditional::Discontiguous => {
381                let discontiguous_start = self.common.get_head_discontiguous_region();
382                Iter {
383                    pr: self,
384                    contiguous_space: None,
385                    discontiguous_start,
386                }
387            }
388        }
389    }
390
391    fn release_pages_extent(&self, _first: Address, bytes: usize) {
392        let pages = crate::util::conversions::bytes_to_pages_up(bytes);
393        debug_assert!(bytes == crate::util::conversions::pages_to_bytes(pages));
394        // FIXME ZERO_PAGES_ON_RELEASE
395        // FIXME Options.protectOnRelease
396        // FIXME VM.events.tracePageReleased
397    }
398
399    fn move_to_next_chunk(&self, guard: &mut MutexGuard<MonotonePageResourceSync>) -> bool {
400        guard.current_chunk = self
401            .vm_map()
402            .get_next_contiguous_region(guard.current_chunk);
403        if guard.current_chunk.is_zero() {
404            false
405        } else {
406            guard.cursor = guard.current_chunk
407                + self
408                    .vm_map()
409                    .get_contiguous_region_size(guard.current_chunk);
410            true
411        }
412    }
413}