mmtk/util/alloc/
allocator.rs

1use crate::global_state::GlobalState;
2use crate::util::address::Address;
3#[cfg(feature = "analysis")]
4use crate::util::analysis::AnalysisManager;
5use crate::util::heap::gc_trigger::GCTrigger;
6use crate::util::options::Options;
7use crate::MMTK;
8
9use std::cell::RefCell;
10use std::sync::atomic::{AtomicBool, Ordering};
11use std::sync::Arc;
12
13use crate::policy::space::Space;
14use crate::util::opaque_pointer::*;
15use crate::vm::VMBinding;
16use crate::vm::{ActivePlan, Collection};
17use downcast_rs::Downcast;
18
19#[repr(C)]
20#[derive(Debug)]
21/// A list of errors that MMTk can encounter during allocation.
22pub enum AllocationError {
23    /// The specified heap size is too small for the given program to continue.
24    HeapOutOfMemory,
25    /// The OS is unable to mmap or acquire more memory. Critical error. MMTk expects the VM to
26    /// abort if such an error is thrown.
27    MmapOutOfMemory,
28}
29
30/// Allow specifying different behaviors with [`Allocator::alloc_with_options`].
31#[repr(C)]
32#[derive(Copy, Clone, PartialEq, Eq, Debug)]
33pub struct AllocationOptions {
34    /// Whether over-committing is allowed at this allocation site.  Over-committing means the
35    /// allocation is allowed to go beyond the current heap size.  But it is not guaranteed to
36    /// succeed.
37    ///
38    /// **The default is `false`**.
39    ///
40    /// Note that regardless of the value of `allow_overcommit`, the allocation may trigger GC if
41    /// the GC trigger considers it needed.
42    pub allow_overcommit: bool,
43
44    /// Whether the allocation is at a safepoint.
45    ///
46    /// **The default is `true`**.
47    ///
48    /// If `true`, the allocation is allowed to block for GC.
49    ///
50    /// If `false`, the allocation will immediately return a null address if the allocation cannot
51    /// be satisfied without a GC.
52    pub at_safepoint: bool,
53
54    /// Whether the allocation is allowed to call [`Collection::out_of_memory`].
55    ///
56    /// **The default is `true`**.
57    ///
58    /// If `true`, the allocation will call [`Collection::out_of_memory`] when out of memory and
59    /// return null.
60    ///
61    /// If `fasle`, the allocation will return null immediately when out of memory.
62    pub allow_oom_call: bool,
63}
64
65/// The default value for `AllocationOptions` has the same semantics as calling [`Allocator::alloc`]
66/// directly.
67impl Default for AllocationOptions {
68    fn default() -> Self {
69        Self {
70            allow_overcommit: false,
71            at_safepoint: true,
72            allow_oom_call: true,
73        }
74    }
75}
76
77impl AllocationOptions {
78    pub(crate) fn is_default(&self) -> bool {
79        *self == AllocationOptions::default()
80    }
81}
82
83/// A wrapper for [`AllocatorContext`] to hold a [`AllocationOptions`] that can be modified by the
84/// same mutator thread.
85///
86/// All [`Allocator`] instances in `Allocators` share one `AllocationOptions` instance, and it will
87/// only be accessed by the mutator (via `Mutator::allocators`) or the GC worker (via
88/// `GCWorker::copy`) that owns it.  Rust doesn't like multiple mutable references pointing to a
89/// shared data structure.  We cannot use [`atomic::Atomic`] because `AllocationOptions` has
90/// multiple fields. We wrap it in a `RefCell` to make it internally mutable.
91///
92/// Note: The allocation option is called every time [`Allocator::alloc_with_options`] is called.
93/// Because API functions should only be called on allocation slow paths, we believe that `RefCell`
94/// should be good enough for performance.  If this is too slow, we may consider `UnsafeCell`.  If
95/// that's still too slow, we should consider changing the API to make the allocation options a
96/// persistent per-mutator value, and allow the VM binding set its value via a new API function.
97struct AllocationOptionsHolder {
98    alloc_options: RefCell<AllocationOptions>,
99}
100
101/// Strictly speaking, `AllocationOptionsHolder` isn't `Sync`.  Two threads cannot set or clear the
102/// same `AllocationOptionsHolder` at the same time.  However, both `Mutator` and `GCWorker` are
103/// `Send`, and both of which own `Allocators` and require its field `Arc<AllocationContext>` to be
104/// `Send`, which requires `AllocationContext` to be `Sync`, which requires
105/// `AllocationOptionsHolder` to be `Sync`.  (Note that `Arc<T>` can be cloned and given to another
106/// thread, and Rust expects `T` to be `Sync`, too.  But we never share `AllocationContext` between
107/// threads, but only between multiple `Allocator` instances within the same `Allocators` instance.
108/// Rust can't figure this out.)
109unsafe impl Sync for AllocationOptionsHolder {}
110
111impl AllocationOptionsHolder {
112    pub fn new(alloc_options: AllocationOptions) -> Self {
113        Self {
114            alloc_options: RefCell::new(alloc_options),
115        }
116    }
117    pub fn set_alloc_options(&self, options: AllocationOptions) {
118        let mut alloc_options = self.alloc_options.borrow_mut();
119        *alloc_options = options;
120    }
121
122    pub fn clear_alloc_options(&self) {
123        let mut alloc_options = self.alloc_options.borrow_mut();
124        *alloc_options = AllocationOptions::default();
125    }
126
127    pub fn get_alloc_options(&self) -> AllocationOptions {
128        let alloc_options = self.alloc_options.borrow();
129        *alloc_options
130    }
131}
132
133pub fn align_allocation_no_fill<VM: VMBinding>(
134    region: Address,
135    alignment: usize,
136    offset: usize,
137) -> Address {
138    align_allocation_inner::<VM>(region, alignment, offset, VM::MIN_ALIGNMENT, false)
139}
140
141pub fn align_allocation<VM: VMBinding>(
142    region: Address,
143    alignment: usize,
144    offset: usize,
145) -> Address {
146    align_allocation_inner::<VM>(region, alignment, offset, VM::MIN_ALIGNMENT, true)
147}
148
149pub fn align_allocation_inner<VM: VMBinding>(
150    region: Address,
151    alignment: usize,
152    offset: usize,
153    known_alignment: usize,
154    fillalignmentgap: bool,
155) -> Address {
156    debug_assert!(known_alignment >= VM::MIN_ALIGNMENT);
157    // Make sure MIN_ALIGNMENT is reasonable.
158    #[allow(clippy::assertions_on_constants)]
159    {
160        // TODO: This is a static assertion that VM::MIN_ALIGNMENT must be at least 4.
161        // This assertion has existed since JikesRVM MMTk.
162        // We are keeping it here because some implementation details of the allocator may rely on this assertion.
163        // Some GC algorithms may require a stricter minimum alignment, and that can override the value.
164        // We should refactor the VM binding API and the internal interface
165        // to reconcile the requirements from the VM and the GC algorithms.
166        debug_assert!(VM::MIN_ALIGNMENT >= std::mem::size_of::<i32>());
167    }
168    debug_assert!(!(fillalignmentgap && region.is_zero()));
169    debug_assert!(alignment <= VM::MAX_ALIGNMENT);
170    debug_assert!(region.is_aligned_to(VM::ALLOC_END_ALIGNMENT));
171    debug_assert!((alignment & (VM::MIN_ALIGNMENT - 1)) == 0);
172    debug_assert!((offset & (VM::MIN_ALIGNMENT - 1)) == 0);
173
174    // No alignment ever required.
175    if alignment <= known_alignment || VM::MAX_ALIGNMENT <= VM::MIN_ALIGNMENT {
176        return region;
177    }
178
179    // May require an alignment
180    let mask = (alignment - 1) as isize; // fromIntSignExtend
181    let neg_off: isize = -(offset as isize); // fromIntSignExtend
182    let delta = neg_off.wrapping_sub_unsigned(region.as_usize()) & mask; // Use wrapping_sub to avoid overflow
183
184    if fillalignmentgap && (VM::ALIGNMENT_VALUE != 0) {
185        fill_alignment_gap::<VM>(region, region + delta);
186    }
187
188    region + delta
189}
190
191/// Fill the specified region with the alignment value.
192pub fn fill_alignment_gap<VM: VMBinding>(start: Address, end: Address) {
193    if VM::ALIGNMENT_VALUE != 0 {
194        let start_ptr = start.to_mut_ptr::<u8>();
195        unsafe {
196            std::ptr::write_bytes(start_ptr, VM::ALIGNMENT_VALUE, end - start);
197        }
198    }
199}
200
201pub fn get_maximum_aligned_size<VM: VMBinding>(size: usize, alignment: usize) -> usize {
202    get_maximum_aligned_size_inner::<VM>(size, alignment, VM::MIN_ALIGNMENT)
203}
204
205pub fn get_maximum_aligned_size_inner<VM: VMBinding>(
206    size: usize,
207    alignment: usize,
208    known_alignment: usize,
209) -> usize {
210    trace!(
211        "size={}, alignment={}, known_alignment={}, MIN_ALIGNMENT={}",
212        size,
213        alignment,
214        known_alignment,
215        VM::MIN_ALIGNMENT
216    );
217    debug_assert!(size == size & !(known_alignment - 1));
218    debug_assert!(known_alignment >= VM::MIN_ALIGNMENT);
219
220    if VM::MAX_ALIGNMENT <= VM::MIN_ALIGNMENT || alignment <= known_alignment {
221        size
222    } else {
223        size + alignment - known_alignment
224    }
225}
226
227#[cfg(debug_assertions)]
228pub(crate) fn assert_allocation_args<VM: VMBinding>(size: usize, align: usize, offset: usize) {
229    use crate::util::constants::*;
230    // MMTk has assumptions about minimal object size.
231    // We need to make sure that all allocations comply with the min object size.
232    // Ideally, we check the allocation size, and if it is smaller, we transparently allocate the min
233    // object size (the VM does not need to know this). However, for the VM bindings we support at the moment,
234    // their object sizes are all larger than MMTk's min object size, so we simply put an assertion here.
235    // If you plan to use MMTk with a VM with its object size smaller than MMTk's min object size, you should
236    // meet the min object size in the fastpath.
237    debug_assert!(size >= MIN_OBJECT_SIZE);
238    // Assert alignment
239    debug_assert!(align >= VM::MIN_ALIGNMENT);
240    debug_assert!(align <= VM::MAX_ALIGNMENT);
241    // Assert offset
242    debug_assert!(VM::USE_ALLOCATION_OFFSET || offset == 0);
243}
244
245/// The context an allocator needs to access in order to perform allocation.
246///
247/// **Note:** An `AllocatorContext` is a thread-local struct, however, it is
248/// used as `Arc<AllocatorContext>` inside all allocator implementations since
249/// we need the entire struct to be `Send`.
250///
251/// See doc comment on `impl Sync` for `AllocationOptionsHolder` above.
252/// See here for more information: <https://github.com/mmtk/mmtk-core/issues/1474>
253pub struct AllocatorContext<VM: VMBinding> {
254    alloc_options: AllocationOptionsHolder,
255    pub state: Arc<GlobalState>,
256    /// Have we thrown an OOM already?
257    pub thrown_oom: AtomicBool,
258    pub options: Arc<Options>,
259    pub gc_trigger: Arc<GCTrigger<VM>>,
260    #[cfg(feature = "analysis")]
261    pub analysis_manager: Arc<AnalysisManager<VM>>,
262}
263
264impl<VM: VMBinding> AllocatorContext<VM> {
265    pub fn new(mmtk: &MMTK<VM>) -> Self {
266        Self {
267            alloc_options: AllocationOptionsHolder::new(AllocationOptions::default()),
268            state: mmtk.state.clone(),
269            thrown_oom: AtomicBool::new(false),
270            options: mmtk.options.clone(),
271            gc_trigger: mmtk.gc_trigger.clone(),
272            #[cfg(feature = "analysis")]
273            analysis_manager: mmtk.analysis_manager.clone(),
274        }
275    }
276
277    pub fn set_alloc_options(&self, options: AllocationOptions) {
278        self.alloc_options.set_alloc_options(options);
279    }
280
281    pub fn clear_alloc_options(&self) {
282        self.alloc_options.clear_alloc_options();
283    }
284
285    pub fn get_alloc_options(&self) -> AllocationOptions {
286        self.alloc_options.get_alloc_options()
287    }
288}
289
290fn reset_allocation_state<VM: VMBinding, A: Allocator<VM> + ?Sized>(allocator: &A) {
291    let context = allocator.get_context();
292    // Relaxed store is fine since this is a thread-local boolean.
293    context.thrown_oom.store(false, Ordering::Relaxed);
294}
295
296/// A trait which implements allocation routines. Every allocator needs to implements this trait.
297pub trait Allocator<VM: VMBinding>: Downcast {
298    /// Return the [`VMThread`] associated with this allocator instance.
299    fn get_tls(&self) -> VMThread;
300
301    /// Return the [`Space`](src/policy/space/Space) instance associated with this allocator instance.
302    fn get_space(&self) -> &'static dyn Space<VM>;
303
304    /// Return the context for the allocator.
305    fn get_context(&self) -> &AllocatorContext<VM>;
306
307    /// Return if this allocator can do thread local allocation. If an allocator does not do thread
308    /// local allocation, each allocation will go to slowpath and will have a check for GC polls.
309    fn does_thread_local_allocation(&self) -> bool;
310
311    /// Return at which granularity the allocator acquires memory from the global space and use
312    /// them as thread local buffer. For example, the [`BumpAllocator`](crate::util::alloc::BumpAllocator) acquires memory at 32KB
313    /// blocks. Depending on the actual size for the current object, they always acquire memory of
314    /// N*32KB (N>=1). Thus the [`BumpAllocator`](crate::util::alloc::BumpAllocator) returns 32KB for this method.  Only allocators
315    /// that do thread local allocation need to implement this method.
316    fn get_thread_local_buffer_granularity(&self) -> usize {
317        assert!(self.does_thread_local_allocation(), "An allocator that does not thread local allocation does not have a buffer granularity.");
318        unimplemented!()
319    }
320
321    /// Check if the requested `size` is an obvious out-of-memory case (requested allocation size is larger than the heap size).
322    /// If it is, call `Collection::out_of_memory`.  Return true if the allocation request is an obvious OOM case, and false otherwise.
323    fn handle_obvious_oom_request(&self, tls: VMThread, size: usize) -> bool {
324        if self.get_context().gc_trigger.will_oom_on_alloc(size) {
325            if self
326                .get_context()
327                .alloc_options
328                .get_alloc_options()
329                .allow_oom_call
330            {
331                self.out_of_memory(tls);
332            }
333            return true;
334        }
335        false
336    }
337
338    /// Wrapper around [`Collection::out_of_memory`]. Used to set up relevant state and signal out
339    /// of memory errors.
340    fn out_of_memory(&self, tls: VMThread) {
341        VM::VMCollection::out_of_memory(tls, AllocationError::HeapOutOfMemory);
342        // Relaxed store is fine since this is a thread-local boolean.
343        self.get_context().thrown_oom.store(true, Ordering::Relaxed);
344    }
345
346    /// An allocation attempt. The implementation of this function depends on the allocator used.
347    /// If an allocator supports thread local allocations, then the allocation will be serviced
348    /// from its TLAB, otherwise it will default to using the slowpath, i.e. [`alloc_slow`](Allocator::alloc_slow).
349    ///
350    /// If the heap is full, we trigger a GC and attempt to free up
351    /// more memory, and re-attempt the allocation.
352    ///
353    /// Note that in the case where the VM is out of memory, we invoke
354    /// [`Collection::out_of_memory`] to inform the binding and then return a null pointer back to
355    /// it. We have no assumptions on whether the VM will continue executing or abort immediately.
356    /// If the VM continues execution, the function will return a null address.
357    ///
358    /// An allocator needs to make sure the object reference for the returned address is in the same
359    /// chunk as the returned address (so the side metadata and the SFT for an object reference is valid).
360    /// See [`crate::util::alloc::object_ref_guard`](util/alloc/object_ref_guard).
361    ///
362    /// Arguments:
363    /// * `size`: the allocation size in bytes.
364    /// * `align`: the required alignment in bytes.
365    /// * `offset` the required offset in bytes.
366    fn alloc(&mut self, size: usize, align: usize, offset: usize) -> Address;
367
368    /// An allocation attempt. The allocation options may specify different behaviors for this allocation request.
369    ///
370    /// Arguments:
371    /// * `size`: the allocation size in bytes.
372    /// * `align`: the required alignment in bytes.
373    /// * `offset` the required offset in bytes.
374    /// * `options`: the allocation options to change the default allocation behavior for this request.
375    fn alloc_with_options(
376        &mut self,
377        size: usize,
378        align: usize,
379        offset: usize,
380        alloc_options: AllocationOptions,
381    ) -> Address {
382        self.get_context().set_alloc_options(alloc_options);
383        let ret = self.alloc(size, align, offset);
384        self.get_context().clear_alloc_options();
385        ret
386    }
387
388    /// Slowpath allocation attempt. This function is explicitly not inlined for performance
389    /// considerations.
390    ///
391    /// Arguments:
392    /// * `size`: the allocation size in bytes.
393    /// * `align`: the required alignment in bytes.
394    /// * `offset` the required offset in bytes.
395    #[inline(never)]
396    fn alloc_slow(&mut self, size: usize, align: usize, offset: usize) -> Address {
397        self.alloc_slow_inline(size, align, offset)
398    }
399
400    /// Slowpath allocation attempt. Mostly the same as [`Allocator::alloc_slow`], except that the allocation options
401    /// may specify different behaviors for this allocation request.
402    ///
403    /// This function is not used internally. It is mostly for the bindings.
404    /// [`Allocator::alloc_with_options`] still calls the normal [`Allocator::alloc_slow`].
405    ///
406    /// Arguments:
407    /// * `size`: the allocation size in bytes.
408    /// * `align`: the required alignment in bytes.
409    /// * `offset` the required offset in bytes.
410    fn alloc_slow_with_options(
411        &mut self,
412        size: usize,
413        align: usize,
414        offset: usize,
415        alloc_options: AllocationOptions,
416    ) -> Address {
417        // The function is not used internally. We won't set no_gc_on_fail redundantly.
418        self.get_context().set_alloc_options(alloc_options);
419        let ret = self.alloc_slow(size, align, offset);
420        self.get_context().clear_alloc_options();
421        ret
422    }
423
424    /// Slowpath allocation attempt. This function executes the actual slowpath allocation.  A
425    /// slowpath allocation in MMTk attempts to allocate the object using the per-allocator
426    /// definition of [`alloc_slow_once`](Allocator::alloc_slow_once). This function also accounts for increasing the
427    /// allocation bytes in order to support stress testing. In case precise stress testing is
428    /// being used, the [`alloc_slow_once_precise_stress`](Allocator::alloc_slow_once_precise_stress) function is used instead.
429    ///
430    /// Note that in the case where the VM is out of memory, we invoke
431    /// [`Collection::out_of_memory`] with a [`AllocationError::HeapOutOfMemory`] error to inform
432    /// the binding and then return a null pointer back to it. We have no assumptions on whether
433    /// the VM will continue executing or abort immediately on a
434    /// [`AllocationError::HeapOutOfMemory`] error.
435    ///
436    /// Arguments:
437    /// * `size`: the allocation size in bytes.
438    /// * `align`: the required alignment in bytes.
439    /// * `offset` the required offset in bytes.
440    fn alloc_slow_inline(&mut self, size: usize, align: usize, offset: usize) -> Address {
441        let tls = self.get_tls();
442        let is_mutator = VM::VMActivePlan::is_mutator(tls);
443        let stress_test = self.get_context().options.is_stress_test_gc_enabled();
444        assert!(!self.get_context().thrown_oom.load(Ordering::Relaxed), "We should not enter alloc_slow_inline if we have already thrown OOM for this allocation request.");
445
446        // Information about the previous collection.
447        let mut emergency_collection = false;
448        let mut previous_result_zero = false;
449
450        loop {
451            // Try to allocate using the slow path
452            let result = if is_mutator && stress_test && *self.get_context().options.precise_stress
453            {
454                // If we are doing precise stress GC, we invoke the special allow_slow_once call.
455                // alloc_slow_once_precise_stress() should make sure that every allocation goes
456                // to the slowpath (here) so we can check the allocation bytes and decide
457                // if we need to do a stress GC.
458
459                // If we should do a stress GC now, we tell the alloc_slow_once_precise_stress()
460                // so they would avoid try any thread local allocation, and directly call
461                // global acquire and do a poll.
462                let need_poll = is_mutator && self.get_context().gc_trigger.should_do_stress_gc();
463                self.alloc_slow_once_precise_stress(size, align, offset, need_poll)
464            } else {
465                // If we are not doing precise stress GC, just call the normal alloc_slow_once().
466                // Normal stress test only checks for stress GC in the slowpath.
467                self.alloc_slow_once_traced(size, align, offset)
468            };
469
470            if !is_mutator {
471                debug_assert!(!result.is_zero());
472                debug_assert!(!self.get_context().thrown_oom.load(Ordering::Relaxed));
473                return result;
474            }
475
476            if !result.is_zero() {
477                // Report allocation success to assist OutOfMemory handling.
478                if !self
479                    .get_context()
480                    .state
481                    .allocation_success
482                    .load(Ordering::Relaxed)
483                {
484                    self.get_context()
485                        .state
486                        .allocation_success
487                        .store(true, Ordering::SeqCst);
488                }
489                debug_assert!(!self.get_context().thrown_oom.load(Ordering::Relaxed));
490
491                // Only update the allocation bytes if we haven't failed a previous allocation in this loop
492                if stress_test && self.get_context().state.is_initialized() && !previous_result_zero
493                {
494                    let allocated_size = if *self.get_context().options.precise_stress
495                        || !self.does_thread_local_allocation()
496                    {
497                        // For precise stress test, or for allocators that do not have thread local buffer,
498                        // we know exactly how many bytes we allocate.
499                        size
500                    } else {
501                        // For normal stress test, we count the entire thread local buffer size as allocated.
502                        crate::util::conversions::raw_align_up(
503                            size,
504                            self.get_thread_local_buffer_granularity(),
505                        )
506                    };
507                    let _allocation_bytes = self
508                        .get_context()
509                        .state
510                        .increase_allocation_bytes_by(allocated_size);
511
512                    // This is the allocation hook for the analysis trait. If you want to call
513                    // an analysis counter specific allocation hook, then here is the place to do so
514                    #[cfg(feature = "analysis")]
515                    if _allocation_bytes > *self.get_context().options.analysis_factor {
516                        trace!(
517                            "Analysis: allocation_bytes = {} more than analysis_factor = {}",
518                            _allocation_bytes,
519                            *self.get_context().options.analysis_factor
520                        );
521
522                        self.get_context()
523                            .analysis_manager
524                            .alloc_hook(size, align, offset);
525                    }
526                }
527
528                return result;
529            }
530
531            // From here on, we handle the case that alloc_once failed.
532            assert!(result.is_zero());
533
534            if !self.get_context().get_alloc_options().at_safepoint {
535                // If the allocation is not at safepoint, it will not be able to block for GC.  But
536                // the code beyond this point tests OOM conditions and, if not OOM, try to allocate
537                // again.  Since we didn't block for GC, the allocation will fail again if we try
538                // again. So we return null immediately.
539                reset_allocation_state(self);
540                return Address::ZERO;
541            }
542
543            // If we have already thrown an OOM for this allocation then return a zero.
544            // Relaxed load and store is fine given this is a thread-local boolean.
545            if self.get_context().thrown_oom.load(Ordering::Relaxed) {
546                // Need to reset the thrown_oom state since we're giving up on this allocation,
547                // that is to say, the thrown_oom state is *per* allocation request
548                reset_allocation_state(self);
549                return Address::ZERO;
550            }
551
552            // It is possible to have cases where a thread is blocked for another GC (non emergency)
553            // immediately after being blocked for a GC (emergency) (e.g. in stress test), that is saying
554            // the thread does not leave this loop between the two GCs. The local var 'emergency_collection'
555            // was set to true after the first GC. But when we execute this check below, we just finished
556            // the second GC, which is not emergency. In such case, we will give a false OOM.
557            // We cannot just rely on the local var. Instead, we get the emergency collection value again,
558            // and check both.
559            if emergency_collection && self.get_context().state.is_emergency_collection() {
560                trace!("Emergency collection");
561                // Report allocation success to assist OutOfMemory handling.
562                // This seems odd, but we must allow each OOM to run its course (and maybe give us back memory)
563                let fail_with_oom = !self
564                    .get_context()
565                    .state
566                    .allocation_success
567                    .swap(true, Ordering::SeqCst);
568                trace!("fail with oom={}", fail_with_oom);
569                if fail_with_oom {
570                    // Note that we throw a `HeapOutOfMemory` error here and return a null ptr back to the VM
571                    trace!("Throw HeapOutOfMemory!");
572                    self.out_of_memory(tls);
573                    reset_allocation_state(self);
574                    self.get_context()
575                        .state
576                        .allocation_success
577                        .store(false, Ordering::SeqCst);
578                    return result;
579                }
580            }
581
582            /* This is in case a GC occurs, and our mutator context is stale.
583             * In some VMs the scheduler can change the affinity between the
584             * current thread and the mutator context. This is possible for
585             * VMs that dynamically multiplex Java threads onto multiple mutator
586             * contexts. */
587            // FIXME: No good way to do this
588            //current = unsafe {
589            //    VMActivePlan::mutator(tls).get_allocator_from_space(space)
590            //};
591
592            // Record whether last collection was an Emergency collection. If so, we make one more
593            // attempt to allocate before we signal an OOM.
594            emergency_collection = self.get_context().state.is_emergency_collection();
595            trace!("Got emergency collection as {}", emergency_collection);
596            previous_result_zero = true;
597        }
598    }
599
600    /// Single slow path allocation attempt. This is called by [`alloc_slow_inline`](Allocator::alloc_slow_inline). The
601    /// implementation of this function depends on the allocator used. Generally, if an allocator
602    /// supports thread local allocations, it will try to allocate more TLAB space here. If it
603    /// doesn't, then (generally) the allocator simply allocates enough space for the current
604    /// object.
605    ///
606    /// Arguments:
607    /// * `size`: the allocation size in bytes.
608    /// * `align`: the required alignment in bytes.
609    /// * `offset` the required offset in bytes.
610    fn alloc_slow_once(&mut self, size: usize, align: usize, offset: usize) -> Address;
611
612    /// A wrapper method for [`alloc_slow_once`](Allocator::alloc_slow_once) to insert USDT tracepoints.
613    ///
614    /// Arguments:
615    /// * `size`: the allocation size in bytes.
616    /// * `align`: the required alignment in bytes.
617    /// * `offset` the required offset in bytes.
618    fn alloc_slow_once_traced(&mut self, size: usize, align: usize, offset: usize) -> Address {
619        probe!(mmtk, alloc_slow_once_start);
620        // probe! expands to an empty block on unsupported platforms
621        #[allow(clippy::let_and_return)]
622        let ret = self.alloc_slow_once(size, align, offset);
623        probe!(mmtk, alloc_slow_once_end);
624        ret
625    }
626
627    /// Single slowpath allocation attempt for stress test. When the stress factor is set (e.g. to
628    /// N), we would expect for every N bytes allocated, we will trigger a stress GC.  However, for
629    /// allocators that do thread local allocation, they may allocate from their thread local
630    /// buffer which does not have a GC poll check, and they may even allocate with the JIT
631    /// generated allocation fastpath which is unaware of stress test GC. For both cases, we are
632    /// not able to guarantee a stress GC is triggered every N bytes. To solve this, when the
633    /// stress factor is set, we will call this method instead of the normal alloc_slow_once(). We
634    /// expect the implementation of this slow allocation will trick the fastpath so every
635    /// allocation will fail in the fastpath, jump to the slow path and eventually call this method
636    /// again for the actual allocation.
637    ///
638    /// The actual implementation about how to trick the fastpath may vary. For example, our bump
639    /// pointer allocator will set the thread local buffer limit to the buffer size instead of the
640    /// buffer end address. In this case, every fastpath check (cursor + size < limit) will fail,
641    /// and jump to this slowpath. In the slowpath, we still allocate from the thread local buffer,
642    /// and recompute the limit (remaining buffer size).
643    ///
644    /// If an allocator does not do thread local allocation (which returns false for
645    /// does_thread_local_allocation()), it does not need to override this method. The default
646    /// implementation will simply call allow_slow_once() and it will work fine for allocators that
647    /// do not have thread local allocation.
648    ///
649    /// Arguments:
650    /// * `size`: the allocation size in bytes.
651    /// * `align`: the required alignment in bytes.
652    /// * `offset` the required offset in bytes.
653    /// * `need_poll`: if this is true, the implementation must poll for a GC, rather than
654    ///   attempting to allocate from the local buffer.
655    fn alloc_slow_once_precise_stress(
656        &mut self,
657        size: usize,
658        align: usize,
659        offset: usize,
660        need_poll: bool,
661    ) -> Address {
662        // If an allocator does thread local allocation but does not override this method to
663        // provide a correct implementation, we will log a warning.
664        if self.does_thread_local_allocation() && need_poll {
665            warn!("{} does not support stress GC (An allocator that does thread local allocation needs to implement allow_slow_once_stress_test()).", std::any::type_name::<Self>());
666        }
667        self.alloc_slow_once_traced(size, align, offset)
668    }
669
670    /// The [`crate::plan::Mutator`] that includes this allocator is going to be destroyed. Some allocators
671    /// may need to save/transfer its thread local data to the space.
672    fn on_mutator_destroy(&mut self) {
673        // By default, do nothing
674    }
675}
676
677impl_downcast!(Allocator<VM> where VM: VMBinding);