1use std::sync::atomic::Ordering;
2use std::sync::Arc;
3
4use super::allocator::{align_allocation_no_fill, fill_alignment_gap, AllocatorContext};
5use super::BumpPointer;
6use crate::policy::immix::line::*;
7use crate::policy::immix::ImmixSpace;
8use crate::policy::space::Space;
9use crate::util::alloc::allocator::get_maximum_aligned_size;
10use crate::util::alloc::Allocator;
11use crate::util::linear_scan::Region;
12use crate::util::opaque_pointer::VMThread;
13use crate::util::rust_util::unlikely;
14use crate::util::Address;
15use crate::vm::*;
16
17#[repr(C)]
19pub struct ImmixAllocator<VM: VMBinding> {
20 pub tls: VMThread,
22 pub bump_pointer: BumpPointer,
24 space: &'static ImmixSpace<VM>,
26 context: Arc<AllocatorContext<VM>>,
27 hot: bool,
29 copy: bool,
31 pub(in crate::util::alloc) large_bump_pointer: BumpPointer,
33 request_for_large: bool,
35 line: Option<Line>,
37}
38
39impl<VM: VMBinding> ImmixAllocator<VM> {
40 pub(crate) fn reset(&mut self) {
41 self.bump_pointer.reset(Address::ZERO, Address::ZERO);
42 self.large_bump_pointer.reset(Address::ZERO, Address::ZERO);
43 self.request_for_large = false;
44 self.line = None;
45 }
46}
47
48impl<VM: VMBinding> Allocator<VM> for ImmixAllocator<VM> {
49 fn get_space(&self) -> &'static dyn Space<VM> {
50 self.space as _
51 }
52
53 fn get_context(&self) -> &AllocatorContext<VM> {
54 &self.context
55 }
56
57 fn does_thread_local_allocation(&self) -> bool {
58 true
59 }
60
61 fn get_thread_local_buffer_granularity(&self) -> usize {
62 crate::policy::immix::block::Block::BYTES
63 }
64
65 fn alloc(&mut self, size: usize, align: usize, offset: usize) -> Address {
66 debug_assert!(
67 size <= crate::policy::immix::MAX_IMMIX_OBJECT_SIZE,
68 "Trying to allocate a {} bytes object, which is larger than MAX_IMMIX_OBJECT_SIZE {}",
69 size,
70 crate::policy::immix::MAX_IMMIX_OBJECT_SIZE
71 );
72
73 let result = align_allocation_no_fill::<VM>(self.bump_pointer.cursor, align, offset);
74 let new_cursor = result + size;
75
76 if new_cursor > self.bump_pointer.limit {
77 trace!(
78 "{:?}: Thread local buffer used up, go to alloc slow path",
79 self.tls
80 );
81 if get_maximum_aligned_size::<VM>(size, align) > Line::BYTES {
82 self.overflow_alloc(size, align, offset)
84 } else {
85 self.alloc_slow_hot(size, align, offset)
87 }
88 } else {
89 fill_alignment_gap::<VM>(self.bump_pointer.cursor, result);
91 self.bump_pointer.cursor = new_cursor;
92 trace!(
93 "{:?}: Bump allocation size: {}, result: {}, new_cursor: {}, limit: {}",
94 self.tls,
95 size,
96 result,
97 self.bump_pointer.cursor,
98 self.bump_pointer.limit
99 );
100 result
101 }
102 }
103
104 fn alloc_slow_once(&mut self, size: usize, align: usize, offset: usize) -> Address {
106 trace!("{:?}: alloc_slow_once", self.tls);
107 self.acquire_clean_block(size, align, offset)
108 }
109
110 fn alloc_slow_once_precise_stress(
115 &mut self,
116 size: usize,
117 align: usize,
118 offset: usize,
119 need_poll: bool,
120 ) -> Address {
121 trace!("{:?}: alloc_slow_once_precise_stress", self.tls);
122 if need_poll {
125 trace!(
126 "{:?}: alloc_slow_once_precise_stress going to poll",
127 self.tls
128 );
129 let ret = self.acquire_clean_block(size, align, offset);
130 self.set_limit_for_stress();
133 trace!(
134 "{:?}: alloc_slow_once_precise_stress done - forced stress poll",
135 self.tls
136 );
137 return ret;
138 }
139
140 self.restore_limit_for_stress();
145 let ret = if self.require_new_block(size, align, offset) {
146 trace!(
149 "{:?}: alloc_slow_once_precise_stress - acquire new block",
150 self.tls
151 );
152 self.acquire_clean_block(size, align, offset)
153 } else {
154 trace!("{:?}: alloc_slow_once_precise_stress - alloc()", self.tls,);
157 self.alloc(size, align, offset)
158 };
159 self.set_limit_for_stress();
161 ret
162 }
163
164 fn get_tls(&self) -> VMThread {
165 self.tls
166 }
167}
168
169impl<VM: VMBinding> ImmixAllocator<VM> {
170 pub(crate) fn new(
171 tls: VMThread,
172 space: Option<&'static dyn Space<VM>>,
173 context: Arc<AllocatorContext<VM>>,
174 copy: bool,
175 ) -> Self {
176 ImmixAllocator {
177 tls,
178 space: space.unwrap().downcast_ref::<ImmixSpace<VM>>().unwrap(),
179 context,
180 bump_pointer: BumpPointer::default(),
181 hot: false,
182 copy,
183 large_bump_pointer: BumpPointer::default(),
184 request_for_large: false,
185 line: None,
186 }
187 }
188
189 pub(crate) fn immix_space(&self) -> &'static ImmixSpace<VM> {
190 self.space
191 }
192
193 fn overflow_alloc(&mut self, size: usize, align: usize, offset: usize) -> Address {
195 trace!("{:?}: overflow_alloc", self.tls);
196 let start = align_allocation_no_fill::<VM>(self.large_bump_pointer.cursor, align, offset);
197 let end = start + size;
198 if end > self.large_bump_pointer.limit {
199 self.request_for_large = true;
200 let rtn = self.alloc_slow_inline(size, align, offset);
201 self.request_for_large = false;
202 rtn
203 } else {
204 fill_alignment_gap::<VM>(self.large_bump_pointer.cursor, start);
205 self.large_bump_pointer.cursor = end;
206 start
207 }
208 }
209
210 fn alloc_slow_hot(&mut self, size: usize, align: usize, offset: usize) -> Address {
212 trace!("{:?}: alloc_slow_hot", self.tls);
213 if self.acquire_recyclable_lines(size, align, offset) {
214 let stress_test = self.context.options.is_stress_test_gc_enabled();
225 let precise_stress = *self.context.options.precise_stress;
226 if unlikely(stress_test && precise_stress) {
227 self.alloc_slow_inline(size, align, offset)
228 } else {
229 self.alloc(size, align, offset)
230 }
231 } else {
232 self.alloc_slow_inline(size, align, offset)
233 }
234 }
235
236 fn acquire_recyclable_lines(&mut self, size: usize, align: usize, offset: usize) -> bool {
238 while self.line.is_some() || self.acquire_recyclable_block() {
239 let line = self.line.unwrap();
240 if let Some((start_line, end_line)) = self.immix_space().get_next_available_lines(line)
241 {
242 self.bump_pointer.cursor = start_line.start();
244 self.bump_pointer.limit = end_line.start();
245 trace!(
246 "{:?}: acquire_recyclable_lines -> {:?} [{:?}, {:?}) {:?}",
247 self.tls,
248 self.line,
249 start_line,
250 end_line,
251 self.tls
252 );
253 crate::util::memory::zero(
254 self.bump_pointer.cursor,
255 self.bump_pointer.limit - self.bump_pointer.cursor,
256 );
257 debug_assert!(
258 align_allocation_no_fill::<VM>(self.bump_pointer.cursor, align, offset) + size
259 <= self.bump_pointer.limit
260 );
261 let block = line.block();
262 self.line = if end_line == block.end_line() {
263 None
265 } else {
266 Some(end_line)
268 };
269 if self.immix_space().should_allocate_as_live() {
271 let state = self.space.line_mark_state.load(Ordering::Acquire);
272 Line::eager_mark_lines::<VM>(state, start_line..end_line);
273 }
274 return true;
275 } else {
276 self.line = None;
278 }
279 }
280 false
281 }
282
283 fn acquire_recyclable_block(&mut self) -> bool {
285 match self.immix_space().get_reusable_block(self.copy) {
286 Some(block) => {
287 trace!("{:?}: acquire_recyclable_block -> {:?}", self.tls, block);
288 self.line = Some(block.start_line());
290 true
291 }
292 _ => false,
293 }
294 }
295
296 fn acquire_clean_block(&mut self, size: usize, align: usize, offset: usize) -> Address {
298 match self.immix_space().get_clean_block(
299 self.tls,
300 self.copy,
301 self.get_context().get_alloc_options(),
302 ) {
303 None => Address::ZERO,
304 Some(block) => {
305 trace!(
306 "{:?}: Acquired a new block {:?} -> {:?}",
307 self.tls,
308 block.start(),
309 block.end()
310 );
311 Line::MARK_TABLE
313 .bzero_metadata(block.start(), crate::policy::immix::block::Block::BYTES);
314 if self.immix_space().should_allocate_as_live() {
316 let state = self.space.line_mark_state.load(Ordering::Acquire);
317 Line::eager_mark_lines::<VM>(state, block.start_line()..block.end_line());
318 }
319 if self.request_for_large {
320 self.large_bump_pointer.cursor = block.start();
321 self.large_bump_pointer.limit = block.end();
322 } else {
323 self.bump_pointer.cursor = block.start();
324 self.bump_pointer.limit = block.end();
325 }
326 self.alloc(size, align, offset)
327 }
328 }
329 }
330
331 fn require_new_block(&mut self, size: usize, align: usize, offset: usize) -> bool {
335 let result = align_allocation_no_fill::<VM>(self.bump_pointer.cursor, align, offset);
336 let new_cursor = result + size;
337 let insufficient_space = new_cursor > self.bump_pointer.limit;
338
339 if insufficient_space && get_maximum_aligned_size::<VM>(size, align) > Line::BYTES {
344 let start =
345 align_allocation_no_fill::<VM>(self.large_bump_pointer.cursor, align, offset);
346 let end = start + size;
347 end > self.large_bump_pointer.limit
348 } else {
349 insufficient_space && !self.acquire_recyclable_lines(size, align, offset)
351 }
352 }
353
354 fn set_limit_for_stress(&mut self) {
358 if self.bump_pointer.cursor < self.bump_pointer.limit {
359 let old_limit = self.bump_pointer.limit;
360 let new_limit =
361 unsafe { Address::from_usize(self.bump_pointer.limit - self.bump_pointer.cursor) };
362 self.bump_pointer.limit = new_limit;
363 trace!(
364 "{:?}: set_limit_for_stress. normal c {} l {} -> {}",
365 self.tls,
366 self.bump_pointer.cursor,
367 old_limit,
368 new_limit,
369 );
370 }
371
372 if self.large_bump_pointer.cursor < self.large_bump_pointer.limit {
373 let old_lg_limit = self.large_bump_pointer.limit;
374 let new_lg_limit = unsafe {
375 Address::from_usize(self.large_bump_pointer.limit - self.large_bump_pointer.cursor)
376 };
377 self.large_bump_pointer.limit = new_lg_limit;
378 trace!(
379 "{:?}: set_limit_for_stress. large c {} l {} -> {}",
380 self.tls,
381 self.large_bump_pointer.cursor,
382 old_lg_limit,
383 new_lg_limit,
384 );
385 }
386 }
387
388 fn restore_limit_for_stress(&mut self) {
393 if self.bump_pointer.limit < self.bump_pointer.cursor {
394 let old_limit = self.bump_pointer.limit;
395 let new_limit = self.bump_pointer.cursor + self.bump_pointer.limit.as_usize();
396 self.bump_pointer.limit = new_limit;
397 trace!(
398 "{:?}: restore_limit_for_stress. normal c {} l {} -> {}",
399 self.tls,
400 self.bump_pointer.cursor,
401 old_limit,
402 new_limit,
403 );
404 }
405
406 if self.large_bump_pointer.limit < self.large_bump_pointer.cursor {
407 let old_lg_limit = self.large_bump_pointer.limit;
408 let new_lg_limit =
409 self.large_bump_pointer.cursor + self.large_bump_pointer.limit.as_usize();
410 self.large_bump_pointer.limit = new_lg_limit;
411 trace!(
412 "{:?}: restore_limit_for_stress. large c {} l {} -> {}",
413 self.tls,
414 self.large_bump_pointer.cursor,
415 old_lg_limit,
416 new_lg_limit,
417 );
418 }
419 }
420}