1use std::sync::Arc;
2
3use crate::util::Address;
4
5use crate::util::alloc::Allocator;
6
7use crate::policy::space::Space;
8use crate::util::conversions::bytes_to_pages_up;
9use crate::util::opaque_pointer::*;
10use crate::vm::VMBinding;
11
12const BLOCK_SIZE: usize = 8 << crate::util::constants::LOG_BYTES_IN_PAGE;
14const BLOCK_MASK: usize = BLOCK_SIZE - 1;
15
16#[repr(C)]
19pub struct BumpAllocator<VM: VMBinding> {
20 pub tls: VMThread,
22 pub bump_pointer: BumpPointer,
24 space: &'static dyn Space<VM>,
26 pub(in crate::util::alloc) context: Arc<AllocatorContext<VM>>,
27}
28
29#[repr(C)]
35#[derive(Copy, Clone)]
36pub struct BumpPointer {
37 pub cursor: Address,
39 pub limit: Address,
41}
42
43impl BumpPointer {
44 pub fn reset(&mut self, start: Address, end: Address) {
46 self.cursor = start;
47 self.limit = end;
48 }
49}
50
51impl std::default::Default for BumpPointer {
52 fn default() -> Self {
56 BumpPointer {
57 cursor: Address::ZERO,
58 limit: Address::ZERO,
59 }
60 }
61}
62
63impl<VM: VMBinding> BumpAllocator<VM> {
64 pub(crate) fn set_limit(&mut self, start: Address, limit: Address) {
65 self.bump_pointer.reset(start, limit);
66 }
67
68 pub(crate) fn reset(&mut self) {
69 let zero = unsafe { Address::zero() };
70 self.bump_pointer.reset(zero, zero);
71 }
72
73 pub(crate) fn rebind(&mut self, space: &'static dyn Space<VM>) {
74 self.reset();
75 self.space = space;
76 }
77}
78
79use crate::util::alloc::allocator::align_allocation_no_fill;
80use crate::util::alloc::fill_alignment_gap;
81
82use super::allocator::AllocatorContext;
83
84impl<VM: VMBinding> Allocator<VM> for BumpAllocator<VM> {
85 fn get_space(&self) -> &'static dyn Space<VM> {
86 self.space
87 }
88
89 fn get_context(&self) -> &AllocatorContext<VM> {
90 &self.context
91 }
92
93 fn does_thread_local_allocation(&self) -> bool {
94 true
95 }
96
97 fn get_thread_local_buffer_granularity(&self) -> usize {
98 BLOCK_SIZE
99 }
100
101 fn alloc(&mut self, size: usize, align: usize, offset: usize) -> Address {
102 trace!("alloc");
103 let result = align_allocation_no_fill::<VM>(self.bump_pointer.cursor, align, offset);
104 let new_cursor = result + size;
105
106 if new_cursor > self.bump_pointer.limit {
107 trace!("Thread local buffer used up, go to alloc slow path");
108 self.alloc_slow(size, align, offset)
109 } else {
110 fill_alignment_gap::<VM>(self.bump_pointer.cursor, result);
111 self.bump_pointer.cursor = new_cursor;
112 trace!(
113 "Bump allocation size: {}, result: {}, new_cursor: {}, limit: {}",
114 size,
115 result,
116 self.bump_pointer.cursor,
117 self.bump_pointer.limit
118 );
119 result
120 }
121 }
122
123 fn alloc_slow_once(&mut self, size: usize, align: usize, offset: usize) -> Address {
124 trace!("alloc_slow");
125 self.acquire_block(size, align, offset, false)
126 }
127
128 fn alloc_slow_once_precise_stress(
137 &mut self,
138 size: usize,
139 align: usize,
140 offset: usize,
141 need_poll: bool,
142 ) -> Address {
143 if need_poll {
144 return self.acquire_block(size, align, offset, true);
145 }
146
147 trace!("alloc_slow stress_test");
148 let result = align_allocation_no_fill::<VM>(self.bump_pointer.cursor, align, offset);
149 let new_cursor = result + size;
150
151 if new_cursor > self.bump_pointer.cursor + self.bump_pointer.limit.as_usize() {
155 self.acquire_block(size, align, offset, true)
156 } else {
157 fill_alignment_gap::<VM>(self.bump_pointer.cursor, result);
158 self.bump_pointer.limit -= new_cursor - self.bump_pointer.cursor;
159 self.bump_pointer.cursor = new_cursor;
160 trace!(
161 "alloc_slow: Bump allocation size: {}, result: {}, new_cursor: {}, limit: {}",
162 size,
163 result,
164 self.bump_pointer.cursor,
165 self.bump_pointer.limit
166 );
167 result
168 }
169 }
170
171 fn get_tls(&self) -> VMThread {
172 self.tls
173 }
174}
175
176impl<VM: VMBinding> BumpAllocator<VM> {
177 pub(crate) fn new(
178 tls: VMThread,
179 space: &'static dyn Space<VM>,
180 context: Arc<AllocatorContext<VM>>,
181 ) -> Self {
182 BumpAllocator {
183 tls,
184 bump_pointer: BumpPointer::default(),
185 space,
186 context,
187 }
188 }
189
190 fn acquire_block(
191 &mut self,
192 size: usize,
193 align: usize,
194 offset: usize,
195 stress_test: bool,
196 ) -> Address {
197 if self.space.handle_obvious_oom_request(
198 self.tls,
199 size,
200 self.get_context().get_alloc_options(),
201 ) {
202 return Address::ZERO;
203 }
204
205 let block_size = (size + BLOCK_MASK) & (!BLOCK_MASK);
206 let acquired_start = self.space.acquire(
207 self.tls,
208 bytes_to_pages_up(block_size),
209 self.get_context().get_alloc_options(),
210 );
211 if acquired_start.is_zero() {
212 trace!("Failed to acquire a new block");
213 acquired_start
214 } else {
215 trace!(
216 "Acquired a new block of size {} with start address {}",
217 block_size,
218 acquired_start
219 );
220 if !stress_test {
221 self.set_limit(acquired_start, acquired_start + block_size);
222 self.alloc(size, align, offset)
223 } else {
224 self.set_limit(acquired_start, unsafe { Address::from_usize(block_size) });
229 self.alloc_slow_once_precise_stress(size, align, offset, false)
233 }
234 }
235 }
236}