mmtk/util/alloc/
bumpallocator.rs

1use std::sync::Arc;
2
3use crate::util::Address;
4
5use crate::util::alloc::Allocator;
6
7use crate::policy::space::Space;
8use crate::util::conversions::bytes_to_pages_up;
9use crate::util::opaque_pointer::*;
10use crate::vm::VMBinding;
11
12/// Size of a bump allocator block. Currently it is set to 32 KB.
13const BLOCK_SIZE: usize = 8 << crate::util::constants::LOG_BYTES_IN_PAGE;
14const BLOCK_MASK: usize = BLOCK_SIZE - 1;
15
16/// A bump pointer allocator. It keeps a thread local allocation buffer,
17/// and bumps a cursor to allocate from the buffer.
18#[repr(C)]
19pub struct BumpAllocator<VM: VMBinding> {
20    /// [`VMThread`] associated with this allocator instance
21    pub tls: VMThread,
22    /// Bump-pointer itself.
23    pub bump_pointer: BumpPointer,
24    /// [`Space`](src/policy/space/Space) instance associated with this allocator instance.
25    space: &'static dyn Space<VM>,
26    pub(in crate::util::alloc) context: Arc<AllocatorContext<VM>>,
27}
28
29/// A common fast-path bump-pointer allocator shared across different allocator implementations
30/// that use bump-pointer allocation.
31/// A `BumpPointer` is always initialized with cursor = 0, limit = 0, so the first allocation
32/// always fails the check of `cursor + size < limit` and goes to the slowpath. A binding
33/// can also take advantage of this design to zero-initialize the a bump pointer.
34#[repr(C)]
35#[derive(Copy, Clone)]
36pub struct BumpPointer {
37    /// The cursor inside the allocation buffer where the next object will be allocated.
38    pub cursor: Address,
39    /// The upperbound of the allocation buffer.
40    pub limit: Address,
41}
42
43impl BumpPointer {
44    /// Reset the cursor and limit to the given values.
45    pub fn reset(&mut self, start: Address, end: Address) {
46        self.cursor = start;
47        self.limit = end;
48    }
49}
50
51impl std::default::Default for BumpPointer {
52    /// Defaults to 0,0. In this case, the first
53    /// allocation would naturally fail the check
54    /// `cursor + size < limit`, and go to the slowpath.
55    fn default() -> Self {
56        BumpPointer {
57            cursor: Address::ZERO,
58            limit: Address::ZERO,
59        }
60    }
61}
62
63impl<VM: VMBinding> BumpAllocator<VM> {
64    pub(crate) fn set_limit(&mut self, start: Address, limit: Address) {
65        self.bump_pointer.reset(start, limit);
66    }
67
68    pub(crate) fn reset(&mut self) {
69        let zero = unsafe { Address::zero() };
70        self.bump_pointer.reset(zero, zero);
71    }
72
73    pub(crate) fn rebind(&mut self, space: &'static dyn Space<VM>) {
74        self.reset();
75        self.space = space;
76    }
77}
78
79use crate::util::alloc::allocator::align_allocation_no_fill;
80use crate::util::alloc::fill_alignment_gap;
81
82use super::allocator::AllocatorContext;
83
84impl<VM: VMBinding> Allocator<VM> for BumpAllocator<VM> {
85    fn get_space(&self) -> &'static dyn Space<VM> {
86        self.space
87    }
88
89    fn get_context(&self) -> &AllocatorContext<VM> {
90        &self.context
91    }
92
93    fn does_thread_local_allocation(&self) -> bool {
94        true
95    }
96
97    fn get_thread_local_buffer_granularity(&self) -> usize {
98        BLOCK_SIZE
99    }
100
101    fn alloc(&mut self, size: usize, align: usize, offset: usize) -> Address {
102        trace!("alloc");
103        let result = align_allocation_no_fill::<VM>(self.bump_pointer.cursor, align, offset);
104        let new_cursor = result + size;
105
106        if new_cursor > self.bump_pointer.limit {
107            trace!("Thread local buffer used up, go to alloc slow path");
108            self.alloc_slow(size, align, offset)
109        } else {
110            fill_alignment_gap::<VM>(self.bump_pointer.cursor, result);
111            self.bump_pointer.cursor = new_cursor;
112            trace!(
113                "Bump allocation size: {}, result: {}, new_cursor: {}, limit: {}",
114                size,
115                result,
116                self.bump_pointer.cursor,
117                self.bump_pointer.limit
118            );
119            result
120        }
121    }
122
123    fn alloc_slow_once(&mut self, size: usize, align: usize, offset: usize) -> Address {
124        trace!("alloc_slow");
125        self.acquire_block(size, align, offset, false)
126    }
127
128    /// Slow path for allocation if precise stress testing has been enabled.
129    /// It works by manipulating the limit to be always below the cursor.
130    /// Can have three different cases:
131    ///  - acquires a new block if the hard limit has been met;
132    ///  - allocates an object using the bump pointer semantics from the
133    ///    fastpath if there is sufficient space; and
134    ///  - does not allocate an object but forces a poll for GC if the stress
135    ///    factor has been crossed.
136    fn alloc_slow_once_precise_stress(
137        &mut self,
138        size: usize,
139        align: usize,
140        offset: usize,
141        need_poll: bool,
142    ) -> Address {
143        if need_poll {
144            return self.acquire_block(size, align, offset, true);
145        }
146
147        trace!("alloc_slow stress_test");
148        let result = align_allocation_no_fill::<VM>(self.bump_pointer.cursor, align, offset);
149        let new_cursor = result + size;
150
151        // For stress test, limit is [0, block_size) to artificially make the
152        // check in the fastpath (alloc()) fail. The real limit is recovered by
153        // adding it to the current cursor.
154        if new_cursor > self.bump_pointer.cursor + self.bump_pointer.limit.as_usize() {
155            self.acquire_block(size, align, offset, true)
156        } else {
157            fill_alignment_gap::<VM>(self.bump_pointer.cursor, result);
158            self.bump_pointer.limit -= new_cursor - self.bump_pointer.cursor;
159            self.bump_pointer.cursor = new_cursor;
160            trace!(
161                "alloc_slow: Bump allocation size: {}, result: {}, new_cursor: {}, limit: {}",
162                size,
163                result,
164                self.bump_pointer.cursor,
165                self.bump_pointer.limit
166            );
167            result
168        }
169    }
170
171    fn get_tls(&self) -> VMThread {
172        self.tls
173    }
174}
175
176impl<VM: VMBinding> BumpAllocator<VM> {
177    pub(crate) fn new(
178        tls: VMThread,
179        space: &'static dyn Space<VM>,
180        context: Arc<AllocatorContext<VM>>,
181    ) -> Self {
182        BumpAllocator {
183            tls,
184            bump_pointer: BumpPointer::default(),
185            space,
186            context,
187        }
188    }
189
190    fn acquire_block(
191        &mut self,
192        size: usize,
193        align: usize,
194        offset: usize,
195        stress_test: bool,
196    ) -> Address {
197        if self.space.handle_obvious_oom_request(
198            self.tls,
199            size,
200            self.get_context().get_alloc_options(),
201        ) {
202            return Address::ZERO;
203        }
204
205        let block_size = (size + BLOCK_MASK) & (!BLOCK_MASK);
206        let acquired_start = self.space.acquire(
207            self.tls,
208            bytes_to_pages_up(block_size),
209            self.get_context().get_alloc_options(),
210        );
211        if acquired_start.is_zero() {
212            trace!("Failed to acquire a new block");
213            acquired_start
214        } else {
215            trace!(
216                "Acquired a new block of size {} with start address {}",
217                block_size,
218                acquired_start
219            );
220            if !stress_test {
221                self.set_limit(acquired_start, acquired_start + block_size);
222                self.alloc(size, align, offset)
223            } else {
224                // For a stress test, we artificially make the fastpath fail by
225                // manipulating the limit as below.
226                // The assumption here is that we use an address range such that
227                // cursor > block_size always.
228                self.set_limit(acquired_start, unsafe { Address::from_usize(block_size) });
229                // Note that we have just acquired a new block so we know that we don't have to go
230                // through the entire allocation sequence again, we can directly call the slow path
231                // allocation.
232                self.alloc_slow_once_precise_stress(size, align, offset, false)
233            }
234        }
235    }
236}