mmtk/util/alloc/
markcompact_allocator.rs

1use std::sync::Arc;
2
3use super::allocator::AllocatorContext;
4use super::BumpAllocator;
5use crate::policy::space::Space;
6use crate::util::alloc::Allocator;
7use crate::util::opaque_pointer::*;
8use crate::util::Address;
9use crate::vm::VMBinding;
10
11/// A thin wrapper(specific implementation) of bump allocator
12/// reserve extra bytes when allocating
13#[repr(C)]
14pub struct MarkCompactAllocator<VM: VMBinding> {
15    pub(in crate::util::alloc) bump_allocator: BumpAllocator<VM>,
16}
17
18impl<VM: VMBinding> MarkCompactAllocator<VM> {
19    pub(crate) fn set_limit(&mut self, cursor: Address, limit: Address) {
20        self.bump_allocator.set_limit(cursor, limit);
21    }
22
23    pub(crate) fn reset(&mut self) {
24        self.bump_allocator.reset();
25    }
26
27    pub(crate) fn rebind(&mut self, space: &'static dyn Space<VM>) {
28        self.bump_allocator.rebind(space);
29    }
30}
31
32impl<VM: VMBinding> Allocator<VM> for MarkCompactAllocator<VM> {
33    fn get_space(&self) -> &'static dyn Space<VM> {
34        self.bump_allocator.get_space()
35    }
36
37    fn get_context(&self) -> &AllocatorContext<VM> {
38        &self.bump_allocator.context
39    }
40
41    fn get_tls(&self) -> VMThread {
42        self.bump_allocator.get_tls()
43    }
44
45    fn does_thread_local_allocation(&self) -> bool {
46        true
47    }
48
49    fn get_thread_local_buffer_granularity(&self) -> usize {
50        self.bump_allocator.get_thread_local_buffer_granularity()
51    }
52
53    fn alloc(&mut self, size: usize, align: usize, offset: usize) -> Address {
54        let rtn = self
55            .bump_allocator
56            .alloc(size + Self::HEADER_RESERVED_IN_BYTES, align, offset);
57        // Check if the result is valid and return the actual object start address
58        // Note that `rtn` can be null in the case of OOM
59        if !rtn.is_zero() {
60            rtn + Self::HEADER_RESERVED_IN_BYTES
61        } else {
62            rtn
63        }
64    }
65
66    fn alloc_slow_once(&mut self, size: usize, align: usize, offset: usize) -> Address {
67        trace!("alloc_slow");
68        self.bump_allocator.alloc_slow_once(size, align, offset)
69    }
70
71    /// Slow path for allocation if precise stress testing has been enabled.
72    /// It works by manipulating the limit to be always below the cursor.
73    /// Can have three different cases:
74    ///  - acquires a new block if the hard limit has been met;
75    ///  - allocates an object using the bump pointer semantics from the
76    ///    fastpath if there is sufficient space; and
77    ///  - does not allocate an object but forces a poll for GC if the stress
78    ///    factor has been crossed.
79    fn alloc_slow_once_precise_stress(
80        &mut self,
81        size: usize,
82        align: usize,
83        offset: usize,
84        need_poll: bool,
85    ) -> Address {
86        self.bump_allocator
87            .alloc_slow_once_precise_stress(size, align, offset, need_poll)
88    }
89}
90
91impl<VM: VMBinding> MarkCompactAllocator<VM> {
92    /// The number of bytes that the allocator reserves for its own header.
93    pub const HEADER_RESERVED_IN_BYTES: usize =
94        crate::policy::markcompactspace::MarkCompactSpace::<VM>::HEADER_RESERVED_IN_BYTES;
95    pub(crate) fn new(
96        tls: VMThread,
97        space: &'static dyn Space<VM>,
98        context: Arc<AllocatorContext<VM>>,
99    ) -> Self {
100        MarkCompactAllocator {
101            bump_allocator: BumpAllocator::new(tls, space, context),
102        }
103    }
104}