mmtk/util/alloc/allocator.rs
1use crate::global_state::GlobalState;
2use crate::util::address::Address;
3#[cfg(feature = "analysis")]
4use crate::util::analysis::AnalysisManager;
5use crate::util::heap::gc_trigger::GCTrigger;
6use crate::util::options::Options;
7use crate::MMTK;
8
9use std::cell::RefCell;
10use std::sync::atomic::Ordering;
11use std::sync::Arc;
12
13use crate::policy::space::Space;
14use crate::util::opaque_pointer::*;
15use crate::vm::VMBinding;
16use crate::vm::{ActivePlan, Collection};
17use downcast_rs::Downcast;
18
19#[repr(C)]
20#[derive(Debug)]
21/// A list of errors that MMTk can encounter during allocation.
22pub enum AllocationError {
23 /// The specified heap size is too small for the given program to continue.
24 HeapOutOfMemory,
25 /// The OS is unable to mmap or acquire more memory. Critical error. MMTk expects the VM to
26 /// abort if such an error is thrown.
27 MmapOutOfMemory,
28}
29
30/// Allow specifying different behaviors with [`Allocator::alloc_with_options`].
31#[repr(C)]
32#[derive(Copy, Clone, PartialEq, Eq, Debug)]
33pub struct AllocationOptions {
34 /// Whether over-committing is allowed at this allocation site. Over-committing means the
35 /// allocation is allowed to go beyond the current heap size. But it is not guaranteed to
36 /// succeed.
37 ///
38 /// **The default is `false`**.
39 ///
40 /// Note that regardless of the value of `allow_overcommit`, the allocation may trigger GC if
41 /// the GC trigger considers it needed.
42 pub allow_overcommit: bool,
43
44 /// Whether the allocation is at a safepoint.
45 ///
46 /// **The default is `true`**.
47 ///
48 /// If `true`, the allocation is allowed to block for GC.
49 ///
50 /// If `false`, the allocation will immediately return a null address if the allocation cannot
51 /// be satisfied without a GC.
52 pub at_safepoint: bool,
53
54 /// Whether the allocation is allowed to call [`Collection::out_of_memory`].
55 ///
56 /// **The default is `true`**.
57 ///
58 /// If `true`, the allocation will call [`Collection::out_of_memory`] when out of memory and
59 /// return null.
60 ///
61 /// If `fasle`, the allocation will return null immediately when out of memory.
62 pub allow_oom_call: bool,
63}
64
65/// The default value for `AllocationOptions` has the same semantics as calling [`Allocator::alloc`]
66/// directly.
67impl Default for AllocationOptions {
68 fn default() -> Self {
69 Self {
70 allow_overcommit: false,
71 at_safepoint: true,
72 allow_oom_call: true,
73 }
74 }
75}
76
77impl AllocationOptions {
78 pub(crate) fn is_default(&self) -> bool {
79 *self == AllocationOptions::default()
80 }
81}
82
83/// A wrapper for [`AllocatorContext`] to hold a [`AllocationOptions`] that can be modified by the
84/// same mutator thread.
85///
86/// All [`Allocator`] instances in `Allocators` share one `AllocationOptions` instance, and it will
87/// only be accessed by the mutator (via `Mutator::allocators`) or the GC worker (via
88/// `GCWorker::copy`) that owns it. Rust doesn't like multiple mutable references pointing to a
89/// shared data structure. We cannot use [`atomic::Atomic`] because `AllocationOptions` has
90/// multiple fields. We wrap it in a `RefCell` to make it internally mutable.
91///
92/// Note: The allocation option is called every time [`Allocator::alloc_with_options`] is called.
93/// Because API functions should only be called on allocation slow paths, we believe that `RefCell`
94/// should be good enough for performance. If this is too slow, we may consider `UnsafeCell`. If
95/// that's still too slow, we should consider changing the API to make the allocation options a
96/// persistent per-mutator value, and allow the VM binding set its value via a new API function.
97struct AllocationOptionsHolder {
98 alloc_options: RefCell<AllocationOptions>,
99}
100
101/// Strictly speaking, `AllocationOptionsHolder` isn't `Sync`. Two threads cannot set or clear the
102/// same `AllocationOptionsHolder` at the same time. However, both `Mutator` and `GCWorker` are
103/// `Send`, and both of which own `Allocators` and require its field `Arc<AllocationContext>` to be
104/// `Send`, which requires `AllocationContext` to be `Sync`, which requires
105/// `AllocationOptionsHolder` to be `Sync`. (Note that `Arc<T>` can be cloned and given to another
106/// thread, and Rust expects `T` to be `Sync`, too. But we never share `AllocationContext` between
107/// threads, but only between multiple `Allocator` instances within the same `Allocators` instance.
108/// Rust can't figure this out.)
109unsafe impl Sync for AllocationOptionsHolder {}
110
111impl AllocationOptionsHolder {
112 pub fn new(alloc_options: AllocationOptions) -> Self {
113 Self {
114 alloc_options: RefCell::new(alloc_options),
115 }
116 }
117 pub fn set_alloc_options(&self, options: AllocationOptions) {
118 let mut alloc_options = self.alloc_options.borrow_mut();
119 *alloc_options = options;
120 }
121
122 pub fn clear_alloc_options(&self) {
123 let mut alloc_options = self.alloc_options.borrow_mut();
124 *alloc_options = AllocationOptions::default();
125 }
126
127 pub fn get_alloc_options(&self) -> AllocationOptions {
128 let alloc_options = self.alloc_options.borrow();
129 *alloc_options
130 }
131}
132
133pub fn align_allocation_no_fill<VM: VMBinding>(
134 region: Address,
135 alignment: usize,
136 offset: usize,
137) -> Address {
138 align_allocation_inner::<VM>(region, alignment, offset, VM::MIN_ALIGNMENT, false)
139}
140
141pub fn align_allocation<VM: VMBinding>(
142 region: Address,
143 alignment: usize,
144 offset: usize,
145) -> Address {
146 align_allocation_inner::<VM>(region, alignment, offset, VM::MIN_ALIGNMENT, true)
147}
148
149pub fn align_allocation_inner<VM: VMBinding>(
150 region: Address,
151 alignment: usize,
152 offset: usize,
153 known_alignment: usize,
154 fillalignmentgap: bool,
155) -> Address {
156 debug_assert!(known_alignment >= VM::MIN_ALIGNMENT);
157 // Make sure MIN_ALIGNMENT is reasonable.
158 #[allow(clippy::assertions_on_constants)]
159 {
160 // TODO: This is a static assertion that VM::MIN_ALIGNMENT must be at least 4.
161 // This assertion has existed since JikesRVM MMTk.
162 // We are keeping it here because some implementation details of the allocator may rely on this assertion.
163 // Some GC algorithms may require a stricter minimum alignment, and that can override the value.
164 // We should refactor the VM binding API and the internal interface
165 // to reconcile the requirements from the VM and the GC algorithms.
166 debug_assert!(VM::MIN_ALIGNMENT >= std::mem::size_of::<i32>());
167 }
168 debug_assert!(!(fillalignmentgap && region.is_zero()));
169 debug_assert!(alignment <= VM::MAX_ALIGNMENT);
170 debug_assert!(region.is_aligned_to(VM::ALLOC_END_ALIGNMENT));
171 debug_assert!((alignment & (VM::MIN_ALIGNMENT - 1)) == 0);
172 debug_assert!((offset & (VM::MIN_ALIGNMENT - 1)) == 0);
173
174 // No alignment ever required.
175 if alignment <= known_alignment || VM::MAX_ALIGNMENT <= VM::MIN_ALIGNMENT {
176 return region;
177 }
178
179 // May require an alignment
180 let mask = (alignment - 1) as isize; // fromIntSignExtend
181 let neg_off: isize = -(offset as isize); // fromIntSignExtend
182 let delta = neg_off.wrapping_sub_unsigned(region.as_usize()) & mask; // Use wrapping_sub to avoid overflow
183
184 if fillalignmentgap && (VM::ALIGNMENT_VALUE != 0) {
185 fill_alignment_gap::<VM>(region, region + delta);
186 }
187
188 region + delta
189}
190
191/// Fill the specified region with the alignment value.
192pub fn fill_alignment_gap<VM: VMBinding>(start: Address, end: Address) {
193 if VM::ALIGNMENT_VALUE != 0 {
194 let start_ptr = start.to_mut_ptr::<u8>();
195 unsafe {
196 std::ptr::write_bytes(start_ptr, VM::ALIGNMENT_VALUE, end - start);
197 }
198 }
199}
200
201pub fn get_maximum_aligned_size<VM: VMBinding>(size: usize, alignment: usize) -> usize {
202 get_maximum_aligned_size_inner::<VM>(size, alignment, VM::MIN_ALIGNMENT)
203}
204
205pub fn get_maximum_aligned_size_inner<VM: VMBinding>(
206 size: usize,
207 alignment: usize,
208 known_alignment: usize,
209) -> usize {
210 trace!(
211 "size={}, alignment={}, known_alignment={}, MIN_ALIGNMENT={}",
212 size,
213 alignment,
214 known_alignment,
215 VM::MIN_ALIGNMENT
216 );
217 debug_assert!(size == size & !(known_alignment - 1));
218 debug_assert!(known_alignment >= VM::MIN_ALIGNMENT);
219
220 if VM::MAX_ALIGNMENT <= VM::MIN_ALIGNMENT || alignment <= known_alignment {
221 size
222 } else {
223 size + alignment - known_alignment
224 }
225}
226
227#[cfg(debug_assertions)]
228pub(crate) fn assert_allocation_args<VM: VMBinding>(size: usize, align: usize, offset: usize) {
229 use crate::util::constants::*;
230 // MMTk has assumptions about minimal object size.
231 // We need to make sure that all allocations comply with the min object size.
232 // Ideally, we check the allocation size, and if it is smaller, we transparently allocate the min
233 // object size (the VM does not need to know this). However, for the VM bindings we support at the moment,
234 // their object sizes are all larger than MMTk's min object size, so we simply put an assertion here.
235 // If you plan to use MMTk with a VM with its object size smaller than MMTk's min object size, you should
236 // meet the min object size in the fastpath.
237 debug_assert!(size >= MIN_OBJECT_SIZE);
238 // Assert alignment
239 debug_assert!(align >= VM::MIN_ALIGNMENT);
240 debug_assert!(align <= VM::MAX_ALIGNMENT);
241 // Assert offset
242 debug_assert!(VM::USE_ALLOCATION_OFFSET || offset == 0);
243}
244
245/// The context an allocator needs to access in order to perform allocation.
246pub struct AllocatorContext<VM: VMBinding> {
247 alloc_options: AllocationOptionsHolder,
248 pub state: Arc<GlobalState>,
249 pub options: Arc<Options>,
250 pub gc_trigger: Arc<GCTrigger<VM>>,
251 #[cfg(feature = "analysis")]
252 pub analysis_manager: Arc<AnalysisManager<VM>>,
253}
254
255impl<VM: VMBinding> AllocatorContext<VM> {
256 pub fn new(mmtk: &MMTK<VM>) -> Self {
257 Self {
258 alloc_options: AllocationOptionsHolder::new(AllocationOptions::default()),
259 state: mmtk.state.clone(),
260 options: mmtk.options.clone(),
261 gc_trigger: mmtk.gc_trigger.clone(),
262 #[cfg(feature = "analysis")]
263 analysis_manager: mmtk.analysis_manager.clone(),
264 }
265 }
266
267 pub fn set_alloc_options(&self, options: AllocationOptions) {
268 self.alloc_options.set_alloc_options(options);
269 }
270
271 pub fn clear_alloc_options(&self) {
272 self.alloc_options.clear_alloc_options();
273 }
274
275 pub fn get_alloc_options(&self) -> AllocationOptions {
276 self.alloc_options.get_alloc_options()
277 }
278}
279
280/// A trait which implements allocation routines. Every allocator needs to implements this trait.
281pub trait Allocator<VM: VMBinding>: Downcast {
282 /// Return the [`VMThread`] associated with this allocator instance.
283 fn get_tls(&self) -> VMThread;
284
285 /// Return the [`Space`](src/policy/space/Space) instance associated with this allocator instance.
286 fn get_space(&self) -> &'static dyn Space<VM>;
287
288 /// Return the context for the allocator.
289 fn get_context(&self) -> &AllocatorContext<VM>;
290
291 /// Return if this allocator can do thread local allocation. If an allocator does not do thread
292 /// local allocation, each allocation will go to slowpath and will have a check for GC polls.
293 fn does_thread_local_allocation(&self) -> bool;
294
295 /// Return at which granularity the allocator acquires memory from the global space and use
296 /// them as thread local buffer. For example, the [`BumpAllocator`](crate::util::alloc::BumpAllocator) acquires memory at 32KB
297 /// blocks. Depending on the actual size for the current object, they always acquire memory of
298 /// N*32KB (N>=1). Thus the [`BumpAllocator`](crate::util::alloc::BumpAllocator) returns 32KB for this method. Only allocators
299 /// that do thread local allocation need to implement this method.
300 fn get_thread_local_buffer_granularity(&self) -> usize {
301 assert!(self.does_thread_local_allocation(), "An allocator that does not thread local allocation does not have a buffer granularity.");
302 unimplemented!()
303 }
304
305 /// Check if the requested `size` is an obvious out-of-memory case (requested allocation size is larger than the heap size).
306 /// If it is, call `Collection::out_of_memory`. Return true if the allocation request is an obvious OOM case, and false otherwise.
307 fn handle_obvious_oom_request(&self, tls: VMThread, size: usize) -> bool {
308 if self.get_context().gc_trigger.will_oom_on_alloc(size) {
309 if self
310 .get_context()
311 .alloc_options
312 .get_alloc_options()
313 .allow_oom_call
314 {
315 VM::VMCollection::out_of_memory(
316 tls,
317 crate::util::alloc::AllocationError::HeapOutOfMemory,
318 );
319 }
320 return true;
321 }
322 false
323 }
324
325 /// An allocation attempt. The implementation of this function depends on the allocator used.
326 /// If an allocator supports thread local allocations, then the allocation will be serviced
327 /// from its TLAB, otherwise it will default to using the slowpath, i.e. [`alloc_slow`](Allocator::alloc_slow).
328 ///
329 /// If the heap is full, we trigger a GC and attempt to free up
330 /// more memory, and re-attempt the allocation.
331 ///
332 /// Note that in the case where the VM is out of memory, we invoke
333 /// [`Collection::out_of_memory`] to inform the binding and then return a null pointer back to
334 /// it. We have no assumptions on whether the VM will continue executing or abort immediately.
335 /// If the VM continues execution, the function will return a null address.
336 ///
337 /// An allocator needs to make sure the object reference for the returned address is in the same
338 /// chunk as the returned address (so the side metadata and the SFT for an object reference is valid).
339 /// See [`crate::util::alloc::object_ref_guard`](util/alloc/object_ref_guard).
340 ///
341 /// Arguments:
342 /// * `size`: the allocation size in bytes.
343 /// * `align`: the required alignment in bytes.
344 /// * `offset` the required offset in bytes.
345 fn alloc(&mut self, size: usize, align: usize, offset: usize) -> Address;
346
347 /// An allocation attempt. The allocation options may specify different behaviors for this allocation request.
348 ///
349 /// Arguments:
350 /// * `size`: the allocation size in bytes.
351 /// * `align`: the required alignment in bytes.
352 /// * `offset` the required offset in bytes.
353 /// * `options`: the allocation options to change the default allocation behavior for this request.
354 fn alloc_with_options(
355 &mut self,
356 size: usize,
357 align: usize,
358 offset: usize,
359 alloc_options: AllocationOptions,
360 ) -> Address {
361 self.get_context().set_alloc_options(alloc_options);
362 let ret = self.alloc(size, align, offset);
363 self.get_context().clear_alloc_options();
364 ret
365 }
366
367 /// Slowpath allocation attempt. This function is explicitly not inlined for performance
368 /// considerations.
369 ///
370 /// Arguments:
371 /// * `size`: the allocation size in bytes.
372 /// * `align`: the required alignment in bytes.
373 /// * `offset` the required offset in bytes.
374 #[inline(never)]
375 fn alloc_slow(&mut self, size: usize, align: usize, offset: usize) -> Address {
376 self.alloc_slow_inline(size, align, offset)
377 }
378
379 /// Slowpath allocation attempt. Mostly the same as [`Allocator::alloc_slow`], except that the allocation options
380 /// may specify different behaviors for this allocation request.
381 ///
382 /// This function is not used internally. It is mostly for the bindings.
383 /// [`Allocator::alloc_with_options`] still calls the normal [`Allocator::alloc_slow`].
384 ///
385 /// Arguments:
386 /// * `size`: the allocation size in bytes.
387 /// * `align`: the required alignment in bytes.
388 /// * `offset` the required offset in bytes.
389 fn alloc_slow_with_options(
390 &mut self,
391 size: usize,
392 align: usize,
393 offset: usize,
394 alloc_options: AllocationOptions,
395 ) -> Address {
396 // The function is not used internally. We won't set no_gc_on_fail redundantly.
397 self.get_context().set_alloc_options(alloc_options);
398 let ret = self.alloc_slow(size, align, offset);
399 self.get_context().clear_alloc_options();
400 ret
401 }
402
403 /// Slowpath allocation attempt. This function executes the actual slowpath allocation. A
404 /// slowpath allocation in MMTk attempts to allocate the object using the per-allocator
405 /// definition of [`alloc_slow_once`](Allocator::alloc_slow_once). This function also accounts for increasing the
406 /// allocation bytes in order to support stress testing. In case precise stress testing is
407 /// being used, the [`alloc_slow_once_precise_stress`](Allocator::alloc_slow_once_precise_stress) function is used instead.
408 ///
409 /// Note that in the case where the VM is out of memory, we invoke
410 /// [`Collection::out_of_memory`] with a [`AllocationError::HeapOutOfMemory`] error to inform
411 /// the binding and then return a null pointer back to it. We have no assumptions on whether
412 /// the VM will continue executing or abort immediately on a
413 /// [`AllocationError::HeapOutOfMemory`] error.
414 ///
415 /// Arguments:
416 /// * `size`: the allocation size in bytes.
417 /// * `align`: the required alignment in bytes.
418 /// * `offset` the required offset in bytes.
419 fn alloc_slow_inline(&mut self, size: usize, align: usize, offset: usize) -> Address {
420 let tls = self.get_tls();
421 let is_mutator = VM::VMActivePlan::is_mutator(tls);
422 let stress_test = self.get_context().options.is_stress_test_gc_enabled();
423
424 // Information about the previous collection.
425 let mut emergency_collection = false;
426 let mut previous_result_zero = false;
427
428 loop {
429 // Try to allocate using the slow path
430 let result = if is_mutator && stress_test && *self.get_context().options.precise_stress
431 {
432 // If we are doing precise stress GC, we invoke the special allow_slow_once call.
433 // alloc_slow_once_precise_stress() should make sure that every allocation goes
434 // to the slowpath (here) so we can check the allocation bytes and decide
435 // if we need to do a stress GC.
436
437 // If we should do a stress GC now, we tell the alloc_slow_once_precise_stress()
438 // so they would avoid try any thread local allocation, and directly call
439 // global acquire and do a poll.
440 let need_poll = is_mutator && self.get_context().gc_trigger.should_do_stress_gc();
441 self.alloc_slow_once_precise_stress(size, align, offset, need_poll)
442 } else {
443 // If we are not doing precise stress GC, just call the normal alloc_slow_once().
444 // Normal stress test only checks for stress GC in the slowpath.
445 self.alloc_slow_once_traced(size, align, offset)
446 };
447
448 if !is_mutator {
449 debug_assert!(!result.is_zero());
450 return result;
451 }
452
453 if !result.is_zero() {
454 // Report allocation success to assist OutOfMemory handling.
455 if !self
456 .get_context()
457 .state
458 .allocation_success
459 .load(Ordering::Relaxed)
460 {
461 self.get_context()
462 .state
463 .allocation_success
464 .store(true, Ordering::SeqCst);
465 }
466
467 // Only update the allocation bytes if we haven't failed a previous allocation in this loop
468 if stress_test && self.get_context().state.is_initialized() && !previous_result_zero
469 {
470 let allocated_size = if *self.get_context().options.precise_stress
471 || !self.does_thread_local_allocation()
472 {
473 // For precise stress test, or for allocators that do not have thread local buffer,
474 // we know exactly how many bytes we allocate.
475 size
476 } else {
477 // For normal stress test, we count the entire thread local buffer size as allocated.
478 crate::util::conversions::raw_align_up(
479 size,
480 self.get_thread_local_buffer_granularity(),
481 )
482 };
483 let _allocation_bytes = self
484 .get_context()
485 .state
486 .increase_allocation_bytes_by(allocated_size);
487
488 // This is the allocation hook for the analysis trait. If you want to call
489 // an analysis counter specific allocation hook, then here is the place to do so
490 #[cfg(feature = "analysis")]
491 if _allocation_bytes > *self.get_context().options.analysis_factor {
492 trace!(
493 "Analysis: allocation_bytes = {} more than analysis_factor = {}",
494 _allocation_bytes,
495 *self.get_context().options.analysis_factor
496 );
497
498 self.get_context()
499 .analysis_manager
500 .alloc_hook(size, align, offset);
501 }
502 }
503
504 return result;
505 }
506
507 // From here on, we handle the case that alloc_once failed.
508 assert!(result.is_zero());
509
510 if !self.get_context().get_alloc_options().at_safepoint {
511 // If the allocation is not at safepoint, it will not be able to block for GC. But
512 // the code beyond this point tests OOM conditions and, if not OOM, try to allocate
513 // again. Since we didn't block for GC, the allocation will fail again if we try
514 // again. So we return null immediately.
515 return Address::ZERO;
516 }
517
518 // It is possible to have cases where a thread is blocked for another GC (non emergency)
519 // immediately after being blocked for a GC (emergency) (e.g. in stress test), that is saying
520 // the thread does not leave this loop between the two GCs. The local var 'emergency_collection'
521 // was set to true after the first GC. But when we execute this check below, we just finished
522 // the second GC, which is not emergency. In such case, we will give a false OOM.
523 // We cannot just rely on the local var. Instead, we get the emergency collection value again,
524 // and check both.
525 if emergency_collection && self.get_context().state.is_emergency_collection() {
526 trace!("Emergency collection");
527 // Report allocation success to assist OutOfMemory handling.
528 // This seems odd, but we must allow each OOM to run its course (and maybe give us back memory)
529 let fail_with_oom = !self
530 .get_context()
531 .state
532 .allocation_success
533 .swap(true, Ordering::SeqCst);
534 trace!("fail with oom={}", fail_with_oom);
535 if fail_with_oom {
536 // Note that we throw a `HeapOutOfMemory` error here and return a null ptr back to the VM
537 trace!("Throw HeapOutOfMemory!");
538 VM::VMCollection::out_of_memory(tls, AllocationError::HeapOutOfMemory);
539 self.get_context()
540 .state
541 .allocation_success
542 .store(false, Ordering::SeqCst);
543 return result;
544 }
545 }
546
547 /* This is in case a GC occurs, and our mutator context is stale.
548 * In some VMs the scheduler can change the affinity between the
549 * current thread and the mutator context. This is possible for
550 * VMs that dynamically multiplex Java threads onto multiple mutator
551 * contexts. */
552 // FIXME: No good way to do this
553 //current = unsafe {
554 // VMActivePlan::mutator(tls).get_allocator_from_space(space)
555 //};
556
557 // Record whether last collection was an Emergency collection. If so, we make one more
558 // attempt to allocate before we signal an OOM.
559 emergency_collection = self.get_context().state.is_emergency_collection();
560 trace!("Got emergency collection as {}", emergency_collection);
561 previous_result_zero = true;
562 }
563 }
564
565 /// Single slow path allocation attempt. This is called by [`alloc_slow_inline`](Allocator::alloc_slow_inline). The
566 /// implementation of this function depends on the allocator used. Generally, if an allocator
567 /// supports thread local allocations, it will try to allocate more TLAB space here. If it
568 /// doesn't, then (generally) the allocator simply allocates enough space for the current
569 /// object.
570 ///
571 /// Arguments:
572 /// * `size`: the allocation size in bytes.
573 /// * `align`: the required alignment in bytes.
574 /// * `offset` the required offset in bytes.
575 fn alloc_slow_once(&mut self, size: usize, align: usize, offset: usize) -> Address;
576
577 /// A wrapper method for [`alloc_slow_once`](Allocator::alloc_slow_once) to insert USDT tracepoints.
578 ///
579 /// Arguments:
580 /// * `size`: the allocation size in bytes.
581 /// * `align`: the required alignment in bytes.
582 /// * `offset` the required offset in bytes.
583 fn alloc_slow_once_traced(&mut self, size: usize, align: usize, offset: usize) -> Address {
584 probe!(mmtk, alloc_slow_once_start);
585 // probe! expands to an empty block on unsupported platforms
586 #[allow(clippy::let_and_return)]
587 let ret = self.alloc_slow_once(size, align, offset);
588 probe!(mmtk, alloc_slow_once_end);
589 ret
590 }
591
592 /// Single slowpath allocation attempt for stress test. When the stress factor is set (e.g. to
593 /// N), we would expect for every N bytes allocated, we will trigger a stress GC. However, for
594 /// allocators that do thread local allocation, they may allocate from their thread local
595 /// buffer which does not have a GC poll check, and they may even allocate with the JIT
596 /// generated allocation fastpath which is unaware of stress test GC. For both cases, we are
597 /// not able to guarantee a stress GC is triggered every N bytes. To solve this, when the
598 /// stress factor is set, we will call this method instead of the normal alloc_slow_once(). We
599 /// expect the implementation of this slow allocation will trick the fastpath so every
600 /// allocation will fail in the fastpath, jump to the slow path and eventually call this method
601 /// again for the actual allocation.
602 ///
603 /// The actual implementation about how to trick the fastpath may vary. For example, our bump
604 /// pointer allocator will set the thread local buffer limit to the buffer size instead of the
605 /// buffer end address. In this case, every fastpath check (cursor + size < limit) will fail,
606 /// and jump to this slowpath. In the slowpath, we still allocate from the thread local buffer,
607 /// and recompute the limit (remaining buffer size).
608 ///
609 /// If an allocator does not do thread local allocation (which returns false for
610 /// does_thread_local_allocation()), it does not need to override this method. The default
611 /// implementation will simply call allow_slow_once() and it will work fine for allocators that
612 /// do not have thread local allocation.
613 ///
614 /// Arguments:
615 /// * `size`: the allocation size in bytes.
616 /// * `align`: the required alignment in bytes.
617 /// * `offset` the required offset in bytes.
618 /// * `need_poll`: if this is true, the implementation must poll for a GC, rather than
619 /// attempting to allocate from the local buffer.
620 fn alloc_slow_once_precise_stress(
621 &mut self,
622 size: usize,
623 align: usize,
624 offset: usize,
625 need_poll: bool,
626 ) -> Address {
627 // If an allocator does thread local allocation but does not override this method to
628 // provide a correct implementation, we will log a warning.
629 if self.does_thread_local_allocation() && need_poll {
630 warn!("{} does not support stress GC (An allocator that does thread local allocation needs to implement allow_slow_once_stress_test()).", std::any::type_name::<Self>());
631 }
632 self.alloc_slow_once_traced(size, align, offset)
633 }
634
635 /// The [`crate::plan::Mutator`] that includes this allocator is going to be destroyed. Some allocators
636 /// may need to save/transfer its thread local data to the space.
637 fn on_mutator_destroy(&mut self) {
638 // By default, do nothing
639 }
640}
641
642impl_downcast!(Allocator<VM> where VM: VMBinding);