mmtk/util/alloc/allocator.rs
1use crate::global_state::GlobalState;
2use crate::util::address::Address;
3#[cfg(feature = "analysis")]
4use crate::util::analysis::AnalysisManager;
5use crate::util::heap::gc_trigger::GCTrigger;
6use crate::util::options::Options;
7use crate::MMTK;
8
9use std::cell::RefCell;
10use std::sync::atomic::Ordering;
11use std::sync::Arc;
12
13use crate::policy::space::Space;
14use crate::util::constants::*;
15use crate::util::opaque_pointer::*;
16use crate::vm::VMBinding;
17use crate::vm::{ActivePlan, Collection};
18use downcast_rs::Downcast;
19
20#[repr(C)]
21#[derive(Debug)]
22/// A list of errors that MMTk can encounter during allocation.
23pub enum AllocationError {
24 /// The specified heap size is too small for the given program to continue.
25 HeapOutOfMemory,
26 /// The OS is unable to mmap or acquire more memory. Critical error. MMTk expects the VM to
27 /// abort if such an error is thrown.
28 MmapOutOfMemory,
29}
30
31/// Allow specifying different behaviors with [`Allocator::alloc_with_options`].
32#[repr(C)]
33#[derive(Copy, Clone, PartialEq, Eq, Debug)]
34pub struct AllocationOptions {
35 /// Whether over-committing is allowed at this allocation site. Over-committing means the
36 /// allocation is allowed to go beyond the current heap size. But it is not guaranteed to
37 /// succeed.
38 ///
39 /// **The default is `false`**.
40 ///
41 /// Note that regardless of the value of `allow_overcommit`, the allocation may trigger GC if
42 /// the GC trigger considers it needed.
43 pub allow_overcommit: bool,
44
45 /// Whether the allocation is at a safepoint.
46 ///
47 /// **The default is `true`**.
48 ///
49 /// If `true`, the allocation is allowed to block for GC.
50 ///
51 /// If `false`, the allocation will immediately return a null address if the allocation cannot
52 /// be satisfied without a GC.
53 pub at_safepoint: bool,
54
55 /// Whether the allocation is allowed to call [`Collection::out_of_memory`].
56 ///
57 /// **The default is `true`**.
58 ///
59 /// If `true`, the allocation will call [`Collection::out_of_memory`] when out of memory and
60 /// return null.
61 ///
62 /// If `fasle`, the allocation will return null immediately when out of memory.
63 pub allow_oom_call: bool,
64}
65
66/// The default value for `AllocationOptions` has the same semantics as calling [`Allocator::alloc`]
67/// directly.
68impl Default for AllocationOptions {
69 fn default() -> Self {
70 Self {
71 allow_overcommit: false,
72 at_safepoint: true,
73 allow_oom_call: true,
74 }
75 }
76}
77
78impl AllocationOptions {
79 pub(crate) fn is_default(&self) -> bool {
80 *self == AllocationOptions::default()
81 }
82}
83
84/// A wrapper for [`AllocatorContext`] to hold a [`AllocationOptions`] that can be modified by the
85/// same mutator thread.
86///
87/// All [`Allocator`] instances in `Allocators` share one `AllocationOptions` instance, and it will
88/// only be accessed by the mutator (via `Mutator::allocators`) or the GC worker (via
89/// `GCWorker::copy`) that owns it. Rust doesn't like multiple mutable references pointing to a
90/// shared data structure. We cannot use [`atomic::Atomic`] because `AllocationOptions` has
91/// multiple fields. We wrap it in a `RefCell` to make it internally mutable.
92///
93/// Note: The allocation option is called every time [`Allocator::alloc_with_options`] is called.
94/// Because API functions should only be called on allocation slow paths, we believe that `RefCell`
95/// should be good enough for performance. If this is too slow, we may consider `UnsafeCell`. If
96/// that's still too slow, we should consider changing the API to make the allocation options a
97/// persistent per-mutator value, and allow the VM binding set its value via a new API function.
98struct AllocationOptionsHolder {
99 alloc_options: RefCell<AllocationOptions>,
100}
101
102/// Strictly speaking, `AllocationOptionsHolder` isn't `Sync`. Two threads cannot set or clear the
103/// same `AllocationOptionsHolder` at the same time. However, both `Mutator` and `GCWorker` are
104/// `Send`, and both of which own `Allocators` and require its field `Arc<AllocationContext>` to be
105/// `Send`, which requires `AllocationContext` to be `Sync`, which requires
106/// `AllocationOptionsHolder` to be `Sync`. (Note that `Arc<T>` can be cloned and given to another
107/// thread, and Rust expects `T` to be `Sync`, too. But we never share `AllocationContext` between
108/// threads, but only between multiple `Allocator` instances within the same `Allocators` instance.
109/// Rust can't figure this out.)
110unsafe impl Sync for AllocationOptionsHolder {}
111
112impl AllocationOptionsHolder {
113 pub fn new(alloc_options: AllocationOptions) -> Self {
114 Self {
115 alloc_options: RefCell::new(alloc_options),
116 }
117 }
118 pub fn set_alloc_options(&self, options: AllocationOptions) {
119 let mut alloc_options = self.alloc_options.borrow_mut();
120 *alloc_options = options;
121 }
122
123 pub fn clear_alloc_options(&self) {
124 let mut alloc_options = self.alloc_options.borrow_mut();
125 *alloc_options = AllocationOptions::default();
126 }
127
128 pub fn get_alloc_options(&self) -> AllocationOptions {
129 let alloc_options = self.alloc_options.borrow();
130 *alloc_options
131 }
132}
133
134pub fn align_allocation_no_fill<VM: VMBinding>(
135 region: Address,
136 alignment: usize,
137 offset: usize,
138) -> Address {
139 align_allocation_inner::<VM>(region, alignment, offset, VM::MIN_ALIGNMENT, false)
140}
141
142pub fn align_allocation<VM: VMBinding>(
143 region: Address,
144 alignment: usize,
145 offset: usize,
146) -> Address {
147 align_allocation_inner::<VM>(region, alignment, offset, VM::MIN_ALIGNMENT, true)
148}
149
150pub fn align_allocation_inner<VM: VMBinding>(
151 region: Address,
152 alignment: usize,
153 offset: usize,
154 known_alignment: usize,
155 fillalignmentgap: bool,
156) -> Address {
157 debug_assert!(known_alignment >= VM::MIN_ALIGNMENT);
158 // Make sure MIN_ALIGNMENT is reasonable.
159 #[allow(clippy::assertions_on_constants)]
160 {
161 debug_assert!(VM::MIN_ALIGNMENT >= BYTES_IN_INT);
162 }
163 debug_assert!(!(fillalignmentgap && region.is_zero()));
164 debug_assert!(alignment <= VM::MAX_ALIGNMENT);
165 debug_assert!(region.is_aligned_to(VM::ALLOC_END_ALIGNMENT));
166 debug_assert!((alignment & (VM::MIN_ALIGNMENT - 1)) == 0);
167 debug_assert!((offset & (VM::MIN_ALIGNMENT - 1)) == 0);
168
169 // No alignment ever required.
170 if alignment <= known_alignment || VM::MAX_ALIGNMENT <= VM::MIN_ALIGNMENT {
171 return region;
172 }
173
174 // May require an alignment
175 let mask = (alignment - 1) as isize; // fromIntSignExtend
176 let neg_off: isize = -(offset as isize); // fromIntSignExtend
177 let delta = neg_off.wrapping_sub_unsigned(region.as_usize()) & mask; // Use wrapping_sub to avoid overflow
178
179 if fillalignmentgap && (VM::ALIGNMENT_VALUE != 0) {
180 fill_alignment_gap::<VM>(region, region + delta);
181 }
182
183 region + delta
184}
185
186/// Fill the specified region with the alignment value.
187pub fn fill_alignment_gap<VM: VMBinding>(start: Address, end: Address) {
188 if VM::ALIGNMENT_VALUE != 0 {
189 let start_ptr = start.to_mut_ptr::<u8>();
190 unsafe {
191 std::ptr::write_bytes(start_ptr, VM::ALIGNMENT_VALUE, end - start);
192 }
193 }
194}
195
196pub fn get_maximum_aligned_size<VM: VMBinding>(size: usize, alignment: usize) -> usize {
197 get_maximum_aligned_size_inner::<VM>(size, alignment, VM::MIN_ALIGNMENT)
198}
199
200pub fn get_maximum_aligned_size_inner<VM: VMBinding>(
201 size: usize,
202 alignment: usize,
203 known_alignment: usize,
204) -> usize {
205 trace!(
206 "size={}, alignment={}, known_alignment={}, MIN_ALIGNMENT={}",
207 size,
208 alignment,
209 known_alignment,
210 VM::MIN_ALIGNMENT
211 );
212 debug_assert!(size == size & !(known_alignment - 1));
213 debug_assert!(known_alignment >= VM::MIN_ALIGNMENT);
214
215 if VM::MAX_ALIGNMENT <= VM::MIN_ALIGNMENT || alignment <= known_alignment {
216 size
217 } else {
218 size + alignment - known_alignment
219 }
220}
221
222#[cfg(debug_assertions)]
223pub(crate) fn assert_allocation_args<VM: VMBinding>(size: usize, align: usize, offset: usize) {
224 // MMTk has assumptions about minimal object size.
225 // We need to make sure that all allocations comply with the min object size.
226 // Ideally, we check the allocation size, and if it is smaller, we transparently allocate the min
227 // object size (the VM does not need to know this). However, for the VM bindings we support at the moment,
228 // their object sizes are all larger than MMTk's min object size, so we simply put an assertion here.
229 // If you plan to use MMTk with a VM with its object size smaller than MMTk's min object size, you should
230 // meet the min object size in the fastpath.
231 debug_assert!(size >= MIN_OBJECT_SIZE);
232 // Assert alignment
233 debug_assert!(align >= VM::MIN_ALIGNMENT);
234 debug_assert!(align <= VM::MAX_ALIGNMENT);
235 // Assert offset
236 debug_assert!(VM::USE_ALLOCATION_OFFSET || offset == 0);
237}
238
239/// The context an allocator needs to access in order to perform allocation.
240pub struct AllocatorContext<VM: VMBinding> {
241 alloc_options: AllocationOptionsHolder,
242 pub state: Arc<GlobalState>,
243 pub options: Arc<Options>,
244 pub gc_trigger: Arc<GCTrigger<VM>>,
245 #[cfg(feature = "analysis")]
246 pub analysis_manager: Arc<AnalysisManager<VM>>,
247}
248
249impl<VM: VMBinding> AllocatorContext<VM> {
250 pub fn new(mmtk: &MMTK<VM>) -> Self {
251 Self {
252 alloc_options: AllocationOptionsHolder::new(AllocationOptions::default()),
253 state: mmtk.state.clone(),
254 options: mmtk.options.clone(),
255 gc_trigger: mmtk.gc_trigger.clone(),
256 #[cfg(feature = "analysis")]
257 analysis_manager: mmtk.analysis_manager.clone(),
258 }
259 }
260
261 pub fn set_alloc_options(&self, options: AllocationOptions) {
262 self.alloc_options.set_alloc_options(options);
263 }
264
265 pub fn clear_alloc_options(&self) {
266 self.alloc_options.clear_alloc_options();
267 }
268
269 pub fn get_alloc_options(&self) -> AllocationOptions {
270 self.alloc_options.get_alloc_options()
271 }
272}
273
274/// A trait which implements allocation routines. Every allocator needs to implements this trait.
275pub trait Allocator<VM: VMBinding>: Downcast {
276 /// Return the [`VMThread`] associated with this allocator instance.
277 fn get_tls(&self) -> VMThread;
278
279 /// Return the [`Space`](src/policy/space/Space) instance associated with this allocator instance.
280 fn get_space(&self) -> &'static dyn Space<VM>;
281
282 /// Return the context for the allocator.
283 fn get_context(&self) -> &AllocatorContext<VM>;
284
285 /// Return if this allocator can do thread local allocation. If an allocator does not do thread
286 /// local allocation, each allocation will go to slowpath and will have a check for GC polls.
287 fn does_thread_local_allocation(&self) -> bool;
288
289 /// Return at which granularity the allocator acquires memory from the global space and use
290 /// them as thread local buffer. For example, the [`BumpAllocator`](crate::util::alloc::BumpAllocator) acquires memory at 32KB
291 /// blocks. Depending on the actual size for the current object, they always acquire memory of
292 /// N*32KB (N>=1). Thus the [`BumpAllocator`](crate::util::alloc::BumpAllocator) returns 32KB for this method. Only allocators
293 /// that do thread local allocation need to implement this method.
294 fn get_thread_local_buffer_granularity(&self) -> usize {
295 assert!(self.does_thread_local_allocation(), "An allocator that does not thread local allocation does not have a buffer granularity.");
296 unimplemented!()
297 }
298
299 /// An allocation attempt. The implementation of this function depends on the allocator used.
300 /// If an allocator supports thread local allocations, then the allocation will be serviced
301 /// from its TLAB, otherwise it will default to using the slowpath, i.e. [`alloc_slow`](Allocator::alloc_slow).
302 ///
303 /// If the heap is full, we trigger a GC and attempt to free up
304 /// more memory, and re-attempt the allocation.
305 ///
306 /// Note that in the case where the VM is out of memory, we invoke
307 /// [`Collection::out_of_memory`] to inform the binding and then return a null pointer back to
308 /// it. We have no assumptions on whether the VM will continue executing or abort immediately.
309 /// If the VM continues execution, the function will return a null address.
310 ///
311 /// An allocator needs to make sure the object reference for the returned address is in the same
312 /// chunk as the returned address (so the side metadata and the SFT for an object reference is valid).
313 /// See [`crate::util::alloc::object_ref_guard`](util/alloc/object_ref_guard).
314 ///
315 /// Arguments:
316 /// * `size`: the allocation size in bytes.
317 /// * `align`: the required alignment in bytes.
318 /// * `offset` the required offset in bytes.
319 fn alloc(&mut self, size: usize, align: usize, offset: usize) -> Address;
320
321 /// An allocation attempt. The allocation options may specify different behaviors for this allocation request.
322 ///
323 /// Arguments:
324 /// * `size`: the allocation size in bytes.
325 /// * `align`: the required alignment in bytes.
326 /// * `offset` the required offset in bytes.
327 /// * `options`: the allocation options to change the default allocation behavior for this request.
328 fn alloc_with_options(
329 &mut self,
330 size: usize,
331 align: usize,
332 offset: usize,
333 alloc_options: AllocationOptions,
334 ) -> Address {
335 self.get_context().set_alloc_options(alloc_options);
336 let ret = self.alloc(size, align, offset);
337 self.get_context().clear_alloc_options();
338 ret
339 }
340
341 /// Slowpath allocation attempt. This function is explicitly not inlined for performance
342 /// considerations.
343 ///
344 /// Arguments:
345 /// * `size`: the allocation size in bytes.
346 /// * `align`: the required alignment in bytes.
347 /// * `offset` the required offset in bytes.
348 #[inline(never)]
349 fn alloc_slow(&mut self, size: usize, align: usize, offset: usize) -> Address {
350 self.alloc_slow_inline(size, align, offset)
351 }
352
353 /// Slowpath allocation attempt. Mostly the same as [`Allocator::alloc_slow`], except that the allocation options
354 /// may specify different behaviors for this allocation request.
355 ///
356 /// This function is not used internally. It is mostly for the bindings.
357 /// [`Allocator::alloc_with_options`] still calls the normal [`Allocator::alloc_slow`].
358 ///
359 /// Arguments:
360 /// * `size`: the allocation size in bytes.
361 /// * `align`: the required alignment in bytes.
362 /// * `offset` the required offset in bytes.
363 fn alloc_slow_with_options(
364 &mut self,
365 size: usize,
366 align: usize,
367 offset: usize,
368 alloc_options: AllocationOptions,
369 ) -> Address {
370 // The function is not used internally. We won't set no_gc_on_fail redundantly.
371 self.get_context().set_alloc_options(alloc_options);
372 let ret = self.alloc_slow(size, align, offset);
373 self.get_context().clear_alloc_options();
374 ret
375 }
376
377 /// Slowpath allocation attempt. This function executes the actual slowpath allocation. A
378 /// slowpath allocation in MMTk attempts to allocate the object using the per-allocator
379 /// definition of [`alloc_slow_once`](Allocator::alloc_slow_once). This function also accounts for increasing the
380 /// allocation bytes in order to support stress testing. In case precise stress testing is
381 /// being used, the [`alloc_slow_once_precise_stress`](Allocator::alloc_slow_once_precise_stress) function is used instead.
382 ///
383 /// Note that in the case where the VM is out of memory, we invoke
384 /// [`Collection::out_of_memory`] with a [`AllocationError::HeapOutOfMemory`] error to inform
385 /// the binding and then return a null pointer back to it. We have no assumptions on whether
386 /// the VM will continue executing or abort immediately on a
387 /// [`AllocationError::HeapOutOfMemory`] error.
388 ///
389 /// Arguments:
390 /// * `size`: the allocation size in bytes.
391 /// * `align`: the required alignment in bytes.
392 /// * `offset` the required offset in bytes.
393 fn alloc_slow_inline(&mut self, size: usize, align: usize, offset: usize) -> Address {
394 let tls = self.get_tls();
395 let is_mutator = VM::VMActivePlan::is_mutator(tls);
396 let stress_test = self.get_context().options.is_stress_test_gc_enabled();
397
398 // Information about the previous collection.
399 let mut emergency_collection = false;
400 let mut previous_result_zero = false;
401
402 loop {
403 // Try to allocate using the slow path
404 let result = if is_mutator && stress_test && *self.get_context().options.precise_stress
405 {
406 // If we are doing precise stress GC, we invoke the special allow_slow_once call.
407 // alloc_slow_once_precise_stress() should make sure that every allocation goes
408 // to the slowpath (here) so we can check the allocation bytes and decide
409 // if we need to do a stress GC.
410
411 // If we should do a stress GC now, we tell the alloc_slow_once_precise_stress()
412 // so they would avoid try any thread local allocation, and directly call
413 // global acquire and do a poll.
414 let need_poll = is_mutator && self.get_context().gc_trigger.should_do_stress_gc();
415 self.alloc_slow_once_precise_stress(size, align, offset, need_poll)
416 } else {
417 // If we are not doing precise stress GC, just call the normal alloc_slow_once().
418 // Normal stress test only checks for stress GC in the slowpath.
419 self.alloc_slow_once_traced(size, align, offset)
420 };
421
422 if !is_mutator {
423 debug_assert!(!result.is_zero());
424 return result;
425 }
426
427 if !result.is_zero() {
428 // Report allocation success to assist OutOfMemory handling.
429 if !self
430 .get_context()
431 .state
432 .allocation_success
433 .load(Ordering::Relaxed)
434 {
435 self.get_context()
436 .state
437 .allocation_success
438 .store(true, Ordering::SeqCst);
439 }
440
441 // Only update the allocation bytes if we haven't failed a previous allocation in this loop
442 if stress_test && self.get_context().state.is_initialized() && !previous_result_zero
443 {
444 let allocated_size = if *self.get_context().options.precise_stress
445 || !self.does_thread_local_allocation()
446 {
447 // For precise stress test, or for allocators that do not have thread local buffer,
448 // we know exactly how many bytes we allocate.
449 size
450 } else {
451 // For normal stress test, we count the entire thread local buffer size as allocated.
452 crate::util::conversions::raw_align_up(
453 size,
454 self.get_thread_local_buffer_granularity(),
455 )
456 };
457 let _allocation_bytes = self
458 .get_context()
459 .state
460 .increase_allocation_bytes_by(allocated_size);
461
462 // This is the allocation hook for the analysis trait. If you want to call
463 // an analysis counter specific allocation hook, then here is the place to do so
464 #[cfg(feature = "analysis")]
465 if _allocation_bytes > *self.get_context().options.analysis_factor {
466 trace!(
467 "Analysis: allocation_bytes = {} more than analysis_factor = {}",
468 _allocation_bytes,
469 *self.get_context().options.analysis_factor
470 );
471
472 self.get_context()
473 .analysis_manager
474 .alloc_hook(size, align, offset);
475 }
476 }
477
478 return result;
479 }
480
481 // From here on, we handle the case that alloc_once failed.
482 assert!(result.is_zero());
483
484 if !self.get_context().get_alloc_options().at_safepoint {
485 // If the allocation is not at safepoint, it will not be able to block for GC. But
486 // the code beyond this point tests OOM conditions and, if not OOM, try to allocate
487 // again. Since we didn't block for GC, the allocation will fail again if we try
488 // again. So we return null immediately.
489 return Address::ZERO;
490 }
491
492 // It is possible to have cases where a thread is blocked for another GC (non emergency)
493 // immediately after being blocked for a GC (emergency) (e.g. in stress test), that is saying
494 // the thread does not leave this loop between the two GCs. The local var 'emergency_collection'
495 // was set to true after the first GC. But when we execute this check below, we just finished
496 // the second GC, which is not emergency. In such case, we will give a false OOM.
497 // We cannot just rely on the local var. Instead, we get the emergency collection value again,
498 // and check both.
499 if emergency_collection && self.get_context().state.is_emergency_collection() {
500 trace!("Emergency collection");
501 // Report allocation success to assist OutOfMemory handling.
502 // This seems odd, but we must allow each OOM to run its course (and maybe give us back memory)
503 let fail_with_oom = !self
504 .get_context()
505 .state
506 .allocation_success
507 .swap(true, Ordering::SeqCst);
508 trace!("fail with oom={}", fail_with_oom);
509 if fail_with_oom {
510 // Note that we throw a `HeapOutOfMemory` error here and return a null ptr back to the VM
511 trace!("Throw HeapOutOfMemory!");
512 VM::VMCollection::out_of_memory(tls, AllocationError::HeapOutOfMemory);
513 self.get_context()
514 .state
515 .allocation_success
516 .store(false, Ordering::SeqCst);
517 return result;
518 }
519 }
520
521 /* This is in case a GC occurs, and our mutator context is stale.
522 * In some VMs the scheduler can change the affinity between the
523 * current thread and the mutator context. This is possible for
524 * VMs that dynamically multiplex Java threads onto multiple mutator
525 * contexts. */
526 // FIXME: No good way to do this
527 //current = unsafe {
528 // VMActivePlan::mutator(tls).get_allocator_from_space(space)
529 //};
530
531 // Record whether last collection was an Emergency collection. If so, we make one more
532 // attempt to allocate before we signal an OOM.
533 emergency_collection = self.get_context().state.is_emergency_collection();
534 trace!("Got emergency collection as {}", emergency_collection);
535 previous_result_zero = true;
536 }
537 }
538
539 /// Single slow path allocation attempt. This is called by [`alloc_slow_inline`](Allocator::alloc_slow_inline). The
540 /// implementation of this function depends on the allocator used. Generally, if an allocator
541 /// supports thread local allocations, it will try to allocate more TLAB space here. If it
542 /// doesn't, then (generally) the allocator simply allocates enough space for the current
543 /// object.
544 ///
545 /// Arguments:
546 /// * `size`: the allocation size in bytes.
547 /// * `align`: the required alignment in bytes.
548 /// * `offset` the required offset in bytes.
549 fn alloc_slow_once(&mut self, size: usize, align: usize, offset: usize) -> Address;
550
551 /// A wrapper method for [`alloc_slow_once`](Allocator::alloc_slow_once) to insert USDT tracepoints.
552 ///
553 /// Arguments:
554 /// * `size`: the allocation size in bytes.
555 /// * `align`: the required alignment in bytes.
556 /// * `offset` the required offset in bytes.
557 fn alloc_slow_once_traced(&mut self, size: usize, align: usize, offset: usize) -> Address {
558 probe!(mmtk, alloc_slow_once_start);
559 // probe! expands to an empty block on unsupported platforms
560 #[allow(clippy::let_and_return)]
561 let ret = self.alloc_slow_once(size, align, offset);
562 probe!(mmtk, alloc_slow_once_end);
563 ret
564 }
565
566 /// Single slowpath allocation attempt for stress test. When the stress factor is set (e.g. to
567 /// N), we would expect for every N bytes allocated, we will trigger a stress GC. However, for
568 /// allocators that do thread local allocation, they may allocate from their thread local
569 /// buffer which does not have a GC poll check, and they may even allocate with the JIT
570 /// generated allocation fastpath which is unaware of stress test GC. For both cases, we are
571 /// not able to guarantee a stress GC is triggered every N bytes. To solve this, when the
572 /// stress factor is set, we will call this method instead of the normal alloc_slow_once(). We
573 /// expect the implementation of this slow allocation will trick the fastpath so every
574 /// allocation will fail in the fastpath, jump to the slow path and eventually call this method
575 /// again for the actual allocation.
576 ///
577 /// The actual implementation about how to trick the fastpath may vary. For example, our bump
578 /// pointer allocator will set the thread local buffer limit to the buffer size instead of the
579 /// buffer end address. In this case, every fastpath check (cursor + size < limit) will fail,
580 /// and jump to this slowpath. In the slowpath, we still allocate from the thread local buffer,
581 /// and recompute the limit (remaining buffer size).
582 ///
583 /// If an allocator does not do thread local allocation (which returns false for
584 /// does_thread_local_allocation()), it does not need to override this method. The default
585 /// implementation will simply call allow_slow_once() and it will work fine for allocators that
586 /// do not have thread local allocation.
587 ///
588 /// Arguments:
589 /// * `size`: the allocation size in bytes.
590 /// * `align`: the required alignment in bytes.
591 /// * `offset` the required offset in bytes.
592 /// * `need_poll`: if this is true, the implementation must poll for a GC, rather than
593 /// attempting to allocate from the local buffer.
594 fn alloc_slow_once_precise_stress(
595 &mut self,
596 size: usize,
597 align: usize,
598 offset: usize,
599 need_poll: bool,
600 ) -> Address {
601 // If an allocator does thread local allocation but does not override this method to
602 // provide a correct implementation, we will log a warning.
603 if self.does_thread_local_allocation() && need_poll {
604 warn!("{} does not support stress GC (An allocator that does thread local allocation needs to implement allow_slow_once_stress_test()).", std::any::type_name::<Self>());
605 }
606 self.alloc_slow_once_traced(size, align, offset)
607 }
608
609 /// The [`crate::plan::Mutator`] that includes this allocator is going to be destroyed. Some allocators
610 /// may need to save/transfer its thread local data to the space.
611 fn on_mutator_destroy(&mut self) {
612 // By default, do nothing
613 }
614}
615
616impl_downcast!(Allocator<VM> where VM: VMBinding);