mmtk/plan/
mutator_context.rs

1//! Mutator context for each application thread.
2
3use crate::plan::barriers::Barrier;
4use crate::plan::global::Plan;
5use crate::plan::AllocationSemantics;
6use crate::policy::space::Space;
7use crate::util::alloc::allocator::AllocationOptions;
8use crate::util::alloc::allocators::{AllocatorSelector, Allocators};
9use crate::util::alloc::Allocator;
10use crate::util::{Address, ObjectReference};
11use crate::util::{VMMutatorThread, VMWorkerThread};
12use crate::vm::VMBinding;
13use crate::MMTK;
14
15use enum_map::EnumMap;
16
17use super::barriers::NoBarrier;
18
19pub(crate) type SpaceMapping<VM> = Vec<(AllocatorSelector, &'static dyn Space<VM>)>;
20
21/// A place-holder implementation for `MutatorConfig::prepare_func` that should not be called.
22/// It is the most often used by plans that sets `PlanConstraints::needs_prepare_mutator` to
23/// `false`.  It is also used by `NoGC` because it must not trigger GC.
24pub(crate) fn unreachable_prepare_func<VM: VMBinding>(
25    _mutator: &mut Mutator<VM>,
26    _tls: VMWorkerThread,
27) {
28    unreachable!("`MutatorConfig::prepare_func` must not be called for the current plan.")
29}
30
31/// An mutator prepare implementation for plans that use [`crate::plan::global::CommonPlan`].
32#[allow(unused_variables)]
33pub(crate) fn common_prepare_func<VM: VMBinding>(mutator: &mut Mutator<VM>, _tls: VMWorkerThread) {
34    // Prepare the free list allocator used for non moving
35    #[cfg(feature = "marksweep_as_nonmoving")]
36    unsafe {
37        mutator.allocator_impl_mut_for_semantic::<crate::util::alloc::FreeListAllocator<VM>>(
38            AllocationSemantics::NonMoving,
39        )
40    }
41    .prepare();
42}
43
44/// A place-holder implementation for `MutatorConfig::release_func` that should not be called.
45/// Currently only used by `NoGC`.
46pub(crate) fn unreachable_release_func<VM: VMBinding>(
47    _mutator: &mut Mutator<VM>,
48    _tls: VMWorkerThread,
49) {
50    unreachable!("`MutatorConfig::release_func` must not be called for the current plan.")
51}
52
53/// An mutator release implementation for plans that use [`crate::plan::global::CommonPlan`].
54#[allow(unused_variables)]
55pub(crate) fn common_release_func<VM: VMBinding>(mutator: &mut Mutator<VM>, _tls: VMWorkerThread) {
56    cfg_if::cfg_if! {
57        if #[cfg(feature = "marksweep_as_nonmoving")] {
58            // Release the free list allocator used for non moving
59            unsafe { mutator.allocator_impl_mut_for_semantic::<crate::util::alloc::FreeListAllocator<VM>>(
60                AllocationSemantics::NonMoving,
61            )}.release();
62        } else if #[cfg(feature = "immortal_as_nonmoving")] {
63            // Do nothig for the bump pointer allocator
64        } else {
65            // Reset the Immix allocator
66            unsafe { mutator.allocator_impl_mut_for_semantic::<crate::util::alloc::ImmixAllocator<VM>>(
67                AllocationSemantics::NonMoving,
68            )}.reset();
69        }
70    }
71}
72
73/// A place-holder implementation for `MutatorConfig::release_func` that does nothing.
74#[allow(dead_code)]
75pub(crate) fn no_op_release_func<VM: VMBinding>(_mutator: &mut Mutator<VM>, _tls: VMWorkerThread) {}
76
77// This struct is part of the Mutator struct.
78// We are trying to make it fixed-sized so that VM bindings can easily define a Mutator type to have the exact same layout as our Mutator struct.
79#[repr(C)]
80pub struct MutatorConfig<VM: VMBinding> {
81    /// Mapping between allocation semantics and allocator selector
82    pub allocator_mapping: &'static EnumMap<AllocationSemantics, AllocatorSelector>,
83    /// Mapping between allocator selector and spaces. Each pair represents a mapping.
84    /// Put this behind a box, so it is a pointer-sized field.
85    #[allow(clippy::box_collection)]
86    pub space_mapping: Box<SpaceMapping<VM>>,
87    /// Plan-specific code for mutator prepare. The VMWorkerThread is the worker thread that executes this prepare function.
88    pub prepare_func: &'static (dyn Fn(&mut Mutator<VM>, VMWorkerThread) + Send + Sync),
89    /// Plan-specific code for mutator release. The VMWorkerThread is the worker thread that executes this release function.
90    pub release_func: &'static (dyn Fn(&mut Mutator<VM>, VMWorkerThread) + Send + Sync),
91}
92
93impl<VM: VMBinding> std::fmt::Debug for MutatorConfig<VM> {
94    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
95        f.write_str("MutatorConfig:\n")?;
96        f.write_str("Semantics mapping:\n")?;
97        for (semantic, selector) in self.allocator_mapping.iter() {
98            let space_name: &str = match self
99                .space_mapping
100                .iter()
101                .find(|(selector_to_find, _)| selector_to_find == selector)
102            {
103                Some((_, space)) => space.name(),
104                None => "!!!missing space here!!!",
105            };
106            f.write_fmt(format_args!(
107                "- {:?} = {:?} ({:?})\n",
108                semantic, selector, space_name
109            ))?;
110        }
111        f.write_str("Space mapping:\n")?;
112        for (selector, space) in self.space_mapping.iter() {
113            f.write_fmt(format_args!("- {:?} = {:?}\n", selector, space.name()))?;
114        }
115        Ok(())
116    }
117}
118
119/// Used to build a mutator struct
120pub struct MutatorBuilder<VM: VMBinding> {
121    barrier: Box<dyn Barrier<VM>>,
122    /// The mutator thread that is bound with this Mutator struct.
123    mutator_tls: VMMutatorThread,
124    mmtk: &'static MMTK<VM>,
125    config: MutatorConfig<VM>,
126}
127
128impl<VM: VMBinding> MutatorBuilder<VM> {
129    pub fn new(
130        mutator_tls: VMMutatorThread,
131        mmtk: &'static MMTK<VM>,
132        config: MutatorConfig<VM>,
133    ) -> Self {
134        MutatorBuilder {
135            barrier: Box::new(NoBarrier),
136            mutator_tls,
137            mmtk,
138            config,
139        }
140    }
141
142    pub fn barrier(mut self, barrier: Box<dyn Barrier<VM>>) -> Self {
143        self.barrier = barrier;
144        self
145    }
146
147    pub fn build(self) -> Mutator<VM> {
148        Mutator {
149            allocators: Allocators::<VM>::new(
150                self.mutator_tls,
151                self.mmtk,
152                &self.config.space_mapping,
153            ),
154            barrier: self.barrier,
155            mutator_tls: self.mutator_tls,
156            plan: self.mmtk.get_plan(),
157            config: self.config,
158        }
159    }
160}
161
162/// A mutator is a per-thread data structure that manages allocations and barriers. It is usually highly coupled with the language VM.
163/// It is recommended for MMTk users 1) to have a mutator struct of the same layout in the thread local storage that can be accessed efficiently,
164/// and 2) to implement fastpath allocation and barriers for the mutator in the VM side.
165// We are trying to make this struct fixed-sized so that VM bindings can easily define a type to have the exact same layout as this struct.
166// Currently Mutator is fixed sized, and we should try keep this invariant:
167// - Allocators are fixed-length arrays of allocators.
168// - MutatorConfig only has pointers/refs (including fat pointers), and is fixed sized.
169#[repr(C)]
170pub struct Mutator<VM: VMBinding> {
171    pub(crate) allocators: Allocators<VM>,
172    /// Holds some thread-local states for the barrier.
173    pub barrier: Box<dyn Barrier<VM>>,
174    /// The mutator thread that is bound with this Mutator struct.
175    pub mutator_tls: VMMutatorThread,
176    pub(crate) plan: &'static dyn Plan<VM = VM>,
177    pub(crate) config: MutatorConfig<VM>,
178}
179
180impl<VM: VMBinding> MutatorContext<VM> for Mutator<VM> {
181    fn prepare(&mut self, tls: VMWorkerThread) {
182        (*self.config.prepare_func)(self, tls)
183    }
184    fn release(&mut self, tls: VMWorkerThread) {
185        (*self.config.release_func)(self, tls)
186    }
187
188    // Note that this method is slow, and we expect VM bindings that care about performance to implement allocation fastpath sequence in their bindings.
189    fn alloc(
190        &mut self,
191        size: usize,
192        align: usize,
193        offset: usize,
194        allocator: AllocationSemantics,
195    ) -> Address {
196        let allocator = unsafe {
197            self.allocators
198                .get_allocator_mut(self.config.allocator_mapping[allocator])
199        };
200        // The value should be default/unset at the beginning of an allocation request.
201        debug_assert!(allocator.get_context().get_alloc_options().is_default());
202        allocator.alloc(size, align, offset)
203    }
204
205    fn alloc_with_options(
206        &mut self,
207        size: usize,
208        align: usize,
209        offset: usize,
210        allocator: AllocationSemantics,
211        options: AllocationOptions,
212    ) -> Address {
213        let allocator = unsafe {
214            self.allocators
215                .get_allocator_mut(self.config.allocator_mapping[allocator])
216        };
217        // The value should be default/unset at the beginning of an allocation request.
218        debug_assert!(allocator.get_context().get_alloc_options().is_default());
219        allocator.alloc_with_options(size, align, offset, options)
220    }
221
222    fn alloc_slow(
223        &mut self,
224        size: usize,
225        align: usize,
226        offset: usize,
227        allocator: AllocationSemantics,
228    ) -> Address {
229        let allocator = unsafe {
230            self.allocators
231                .get_allocator_mut(self.config.allocator_mapping[allocator])
232        };
233        // The value should be default/unset at the beginning of an allocation request.
234        debug_assert!(allocator.get_context().get_alloc_options().is_default());
235        allocator.alloc_slow(size, align, offset)
236    }
237
238    fn alloc_slow_with_options(
239        &mut self,
240        size: usize,
241        align: usize,
242        offset: usize,
243        allocator: AllocationSemantics,
244        options: AllocationOptions,
245    ) -> Address {
246        let allocator = unsafe {
247            self.allocators
248                .get_allocator_mut(self.config.allocator_mapping[allocator])
249        };
250        // The value should be default/unset at the beginning of an allocation request.
251        debug_assert!(allocator.get_context().get_alloc_options().is_default());
252        allocator.alloc_slow_with_options(size, align, offset, options)
253    }
254
255    // Note that this method is slow, and we expect VM bindings that care about performance to implement allocation fastpath sequence in their bindings.
256    fn post_alloc(&mut self, refer: ObjectReference, bytes: usize, allocator: AllocationSemantics) {
257        unsafe {
258            self.allocators
259                .get_allocator_mut(self.config.allocator_mapping[allocator])
260        }
261        .get_space()
262        .initialize_object_metadata(refer, bytes)
263    }
264
265    fn get_tls(&self) -> VMMutatorThread {
266        self.mutator_tls
267    }
268
269    fn barrier(&mut self) -> &mut dyn Barrier<VM> {
270        &mut *self.barrier
271    }
272}
273
274impl<VM: VMBinding> Mutator<VM> {
275    /// Get all the valid allocator selector (no duplicate)
276    fn get_all_allocator_selectors(&self) -> Vec<AllocatorSelector> {
277        use itertools::Itertools;
278        self.config
279            .allocator_mapping
280            .iter()
281            .map(|(_, selector)| *selector)
282            .sorted()
283            .dedup()
284            .filter(|selector| *selector != AllocatorSelector::None)
285            .collect()
286    }
287
288    /// Inform each allocator about destroying. Call allocator-specific on destroy methods.
289    pub fn on_destroy(&mut self) {
290        for selector in self.get_all_allocator_selectors() {
291            unsafe { self.allocators.get_allocator_mut(selector) }.on_mutator_destroy();
292        }
293    }
294
295    /// Get the allocator for the selector.
296    ///
297    /// # Safety
298    /// The selector needs to be valid, and points to an allocator that has been initialized.
299    /// [`crate::memory_manager::get_allocator_mapping`] can be used to get a selector.
300    pub unsafe fn allocator(&self, selector: AllocatorSelector) -> &dyn Allocator<VM> {
301        self.allocators.get_allocator(selector)
302    }
303
304    /// Get the mutable allocator for the selector.
305    ///
306    /// # Safety
307    /// The selector needs to be valid, and points to an allocator that has been initialized.
308    /// [`crate::memory_manager::get_allocator_mapping`] can be used to get a selector.
309    pub unsafe fn allocator_mut(&mut self, selector: AllocatorSelector) -> &mut dyn Allocator<VM> {
310        self.allocators.get_allocator_mut(selector)
311    }
312
313    /// Get the allocator of a concrete type for the selector.
314    ///
315    /// # Safety
316    /// The selector needs to be valid, and points to an allocator that has been initialized.
317    /// [`crate::memory_manager::get_allocator_mapping`] can be used to get a selector.
318    pub unsafe fn allocator_impl<T: Allocator<VM>>(&self, selector: AllocatorSelector) -> &T {
319        self.allocators.get_typed_allocator(selector)
320    }
321
322    /// Get the mutable allocator of a concrete type for the selector.
323    ///
324    /// # Safety
325    /// The selector needs to be valid, and points to an allocator that has been initialized.
326    /// [`crate::memory_manager::get_allocator_mapping`] can be used to get a selector.
327    pub unsafe fn allocator_impl_mut<T: Allocator<VM>>(
328        &mut self,
329        selector: AllocatorSelector,
330    ) -> &mut T {
331        self.allocators.get_typed_allocator_mut(selector)
332    }
333
334    /// Get the allocator of a concrete type for the semantic.
335    ///
336    /// # Safety
337    /// The semantic needs to match the allocator type.
338    pub unsafe fn allocator_impl_for_semantic<T: Allocator<VM>>(
339        &self,
340        semantic: AllocationSemantics,
341    ) -> &T {
342        self.allocator_impl::<T>(self.config.allocator_mapping[semantic])
343    }
344
345    /// Get the mutable allocator of a concrete type for the semantic.
346    ///
347    /// # Safety
348    /// The semantic needs to match the allocator type.
349    pub unsafe fn allocator_impl_mut_for_semantic<T: Allocator<VM>>(
350        &mut self,
351        semantic: AllocationSemantics,
352    ) -> &mut T {
353        self.allocator_impl_mut::<T>(self.config.allocator_mapping[semantic])
354    }
355
356    /// Return the base offset from a mutator pointer to the allocator specified by the selector.
357    pub fn get_allocator_base_offset(selector: AllocatorSelector) -> usize {
358        use crate::util::alloc::*;
359        use memoffset::offset_of;
360        use std::mem::size_of;
361        offset_of!(Mutator<VM>, allocators)
362            + match selector {
363                AllocatorSelector::BumpPointer(index) => {
364                    offset_of!(Allocators<VM>, bump_pointer)
365                        + size_of::<BumpAllocator<VM>>() * index as usize
366                }
367                AllocatorSelector::FreeList(index) => {
368                    offset_of!(Allocators<VM>, free_list)
369                        + size_of::<FreeListAllocator<VM>>() * index as usize
370                }
371                AllocatorSelector::Immix(index) => {
372                    offset_of!(Allocators<VM>, immix)
373                        + size_of::<ImmixAllocator<VM>>() * index as usize
374                }
375                AllocatorSelector::LargeObject(index) => {
376                    offset_of!(Allocators<VM>, large_object)
377                        + size_of::<LargeObjectAllocator<VM>>() * index as usize
378                }
379                AllocatorSelector::Malloc(index) => {
380                    offset_of!(Allocators<VM>, malloc)
381                        + size_of::<MallocAllocator<VM>>() * index as usize
382                }
383                AllocatorSelector::MarkCompact(index) => {
384                    offset_of!(Allocators<VM>, markcompact)
385                        + size_of::<MarkCompactAllocator<VM>>() * index as usize
386                }
387                AllocatorSelector::None => panic!("Expect a valid AllocatorSelector, found None"),
388            }
389    }
390}
391
392/// Each GC plan should provide their implementation of a MutatorContext. *Note that this trait is no longer needed as we removed
393/// per-plan mutator implementation and we will remove this trait as well in the future.*
394// TODO: We should be able to remove this trait, as we removed per-plan mutator implementation, and there is no other type that implements this trait.
395// The Mutator struct above is the only type that implements this trait. We should be able to merge them.
396pub trait MutatorContext<VM: VMBinding>: Send + 'static {
397    /// Do the prepare work for this mutator.
398    fn prepare(&mut self, tls: VMWorkerThread);
399    /// Do the release work for this mutator.
400    fn release(&mut self, tls: VMWorkerThread);
401    /// Allocate memory for an object. This function will trigger a GC on failed allocation.
402    ///
403    /// Arguments:
404    /// * `size`: the number of bytes required for the object.
405    /// * `align`: required alignment for the object.
406    /// * `offset`: offset associated with the alignment. The result plus the offset will be aligned to the given alignment.
407    /// * `allocator`: the allocation semantic used for this object.
408    fn alloc(
409        &mut self,
410        size: usize,
411        align: usize,
412        offset: usize,
413        allocator: AllocationSemantics,
414    ) -> Address;
415    /// Allocate memory for an object with more options to control this allocation request, e.g. not triggering a GC on fail.
416    ///
417    /// Arguments:
418    /// * `size`: the number of bytes required for the object.
419    /// * `align`: required alignment for the object.
420    /// * `offset`: offset associated with the alignment. The result plus the offset will be aligned to the given alignment.
421    /// * `allocator`: the allocation semantic used for this object.
422    /// * `options`: the allocation options to change the default allocation behavior for this request.
423    fn alloc_with_options(
424        &mut self,
425        size: usize,
426        align: usize,
427        offset: usize,
428        allocator: AllocationSemantics,
429        options: AllocationOptions,
430    ) -> Address;
431    /// The slow path allocation for [`MutatorContext::alloc`]. This function will trigger a GC on failed allocation.
432    ///
433    ///  This is only useful when the binding
434    /// implements the fast path allocation, and would like to explicitly
435    /// call the slow path after the fast path allocation fails.
436    fn alloc_slow(
437        &mut self,
438        size: usize,
439        align: usize,
440        offset: usize,
441        allocator: AllocationSemantics,
442    ) -> Address;
443    /// The slow path allocation for [`MutatorContext::alloc_with_options`].
444    ///
445    /// This is only useful when the binding
446    /// implements the fast path allocation, and would like to explicitly
447    /// call the slow path after the fast path allocation fails.
448    fn alloc_slow_with_options(
449        &mut self,
450        size: usize,
451        align: usize,
452        offset: usize,
453        allocator: AllocationSemantics,
454        options: AllocationOptions,
455    ) -> Address;
456    /// Perform post-allocation actions.  For many allocators none are
457    /// required.
458    ///
459    /// Arguments:
460    /// * `refer`: the newly allocated object.
461    /// * `bytes`: the size of the space allocated (in bytes).
462    /// * `allocator`: the allocation semantic used.
463    fn post_alloc(&mut self, refer: ObjectReference, bytes: usize, allocator: AllocationSemantics);
464    /// Flush per-mutator remembered sets and create GC work for the remembered sets.
465    fn flush_remembered_sets(&mut self) {
466        self.barrier().flush();
467    }
468    /// Flush the mutator context.
469    fn flush(&mut self) {
470        self.flush_remembered_sets();
471    }
472    /// Get the mutator thread for this mutator context. This is the same value as the argument supplied in
473    /// [`crate::memory_manager::bind_mutator`] when this mutator is created.
474    fn get_tls(&self) -> VMMutatorThread;
475    /// Get active barrier trait object
476    fn barrier(&mut self) -> &mut dyn Barrier<VM>;
477}
478
479/// This is used for plans to indicate the number of allocators reserved for the plan.
480/// This is used as a parameter for creating allocator/space mapping.
481/// A plan is required to reserve the first few allocators. For example, if n_bump_pointer is 1,
482/// it means the first bump pointer allocator will be reserved for the plan (and the plan should
483/// initialize its mapping itself), and the spaces in common/base plan will use the following bump
484/// pointer allocators.
485#[allow(dead_code)]
486#[derive(Default)]
487pub(crate) struct ReservedAllocators {
488    pub n_bump_pointer: u8,
489    pub n_large_object: u8,
490    pub n_malloc: u8,
491    pub n_immix: u8,
492    pub n_mark_compact: u8,
493    pub n_free_list: u8,
494}
495
496impl ReservedAllocators {
497    pub const DEFAULT: Self = ReservedAllocators {
498        n_bump_pointer: 0,
499        n_large_object: 0,
500        n_malloc: 0,
501        n_immix: 0,
502        n_mark_compact: 0,
503        n_free_list: 0,
504    };
505    /// check if the number of each allocator is okay. Panics if any allocator exceeds the max number.
506    fn validate(&self) {
507        use crate::util::alloc::allocators::*;
508        assert!(
509            self.n_bump_pointer as usize <= MAX_BUMP_ALLOCATORS,
510            "Allocator mapping declared more bump pointer allocators than the max allowed."
511        );
512        assert!(
513            self.n_large_object as usize <= MAX_LARGE_OBJECT_ALLOCATORS,
514            "Allocator mapping declared more large object allocators than the max allowed."
515        );
516        assert!(
517            self.n_malloc as usize <= MAX_MALLOC_ALLOCATORS,
518            "Allocator mapping declared more malloc allocators than the max allowed."
519        );
520        assert!(
521            self.n_immix as usize <= MAX_IMMIX_ALLOCATORS,
522            "Allocator mapping declared more immix allocators than the max allowed."
523        );
524        assert!(
525            self.n_mark_compact as usize <= MAX_MARK_COMPACT_ALLOCATORS,
526            "Allocator mapping declared more mark compact allocators than the max allowed."
527        );
528        assert!(
529            self.n_free_list as usize <= MAX_FREE_LIST_ALLOCATORS,
530            "Allocator mapping declared more free list allocators than the max allowed."
531        );
532    }
533
534    // We may add more allocators from common/base plan after reserved allocators.
535
536    fn add_bump_pointer_allocator(&mut self) -> AllocatorSelector {
537        let selector = AllocatorSelector::BumpPointer(self.n_bump_pointer);
538        self.n_bump_pointer += 1;
539        selector
540    }
541    fn add_large_object_allocator(&mut self) -> AllocatorSelector {
542        let selector = AllocatorSelector::LargeObject(self.n_large_object);
543        self.n_large_object += 1;
544        selector
545    }
546    #[allow(dead_code)]
547    fn add_malloc_allocator(&mut self) -> AllocatorSelector {
548        let selector = AllocatorSelector::Malloc(self.n_malloc);
549        self.n_malloc += 1;
550        selector
551    }
552    #[allow(dead_code)]
553    fn add_immix_allocator(&mut self) -> AllocatorSelector {
554        let selector = AllocatorSelector::Immix(self.n_immix);
555        self.n_immix += 1;
556        selector
557    }
558    #[allow(dead_code)]
559    fn add_mark_compact_allocator(&mut self) -> AllocatorSelector {
560        let selector = AllocatorSelector::MarkCompact(self.n_mark_compact);
561        self.n_mark_compact += 1;
562        selector
563    }
564    #[allow(dead_code)]
565    fn add_free_list_allocator(&mut self) -> AllocatorSelector {
566        let selector = AllocatorSelector::FreeList(self.n_free_list);
567        self.n_free_list += 1;
568        selector
569    }
570}
571
572/// Create an allocator mapping for spaces in Common/BasePlan for a plan. A plan should reserve its own allocators.
573///
574/// # Arguments
575/// * `reserved`: the number of reserved allocators for the plan specific policies.
576/// * `include_common_plan`: whether the plan uses common plan. If a plan uses CommonPlan, we will initialize allocator mapping for spaces in CommonPlan.
577pub(crate) fn create_allocator_mapping(
578    mut reserved: ReservedAllocators,
579    include_common_plan: bool,
580) -> EnumMap<AllocationSemantics, AllocatorSelector> {
581    // If we need to add new allocators, or new spaces, we need to make sure the allocator we assign here matches the allocator
582    // we used in create_space_mapping(). The easiest way is to add the space/allocator mapping in the same order. So for any modification to this
583    // function, please check the other function.
584
585    let mut map = EnumMap::<AllocationSemantics, AllocatorSelector>::default();
586
587    // spaces in base plan
588
589    #[cfg(feature = "code_space")]
590    {
591        map[AllocationSemantics::Code] = reserved.add_bump_pointer_allocator();
592        map[AllocationSemantics::LargeCode] = reserved.add_bump_pointer_allocator();
593    }
594
595    #[cfg(feature = "ro_space")]
596    {
597        map[AllocationSemantics::ReadOnly] = reserved.add_bump_pointer_allocator();
598    }
599
600    // spaces in common plan
601
602    if include_common_plan {
603        map[AllocationSemantics::Immortal] = reserved.add_bump_pointer_allocator();
604        map[AllocationSemantics::Los] = reserved.add_large_object_allocator();
605        map[AllocationSemantics::NonMoving] = if cfg!(feature = "marksweep_as_nonmoving") {
606            reserved.add_free_list_allocator()
607        } else if cfg!(feature = "immortal_as_nonmoving") {
608            reserved.add_bump_pointer_allocator()
609        } else {
610            reserved.add_immix_allocator()
611        };
612    }
613
614    reserved.validate();
615    map
616}
617
618/// Create a space mapping for spaces in Common/BasePlan for a plan. A plan should reserve its own allocators.
619///
620/// # Arguments
621/// * `reserved`: the number of reserved allocators for the plan specific policies.
622/// * `include_common_plan`: whether the plan uses common plan. If a plan uses CommonPlan, we will initialize allocator mapping for spaces in CommonPlan.
623/// * `plan`: the reference to the plan.
624pub(crate) fn create_space_mapping<VM: VMBinding>(
625    mut reserved: ReservedAllocators,
626    include_common_plan: bool,
627    plan: &'static dyn Plan<VM = VM>,
628) -> Vec<(AllocatorSelector, &'static dyn Space<VM>)> {
629    // If we need to add new allocators, or new spaces, we need to make sure the allocator we assign here matches the allocator
630    // we used in create_space_mapping(). The easiest way is to add the space/allocator mapping in the same order. So for any modification to this
631    // function, please check the other function.
632
633    let mut vec: Vec<(AllocatorSelector, &'static dyn Space<VM>)> = vec![];
634
635    // spaces in BasePlan
636
637    #[cfg(feature = "code_space")]
638    {
639        vec.push((
640            reserved.add_bump_pointer_allocator(),
641            &plan.base().code_space,
642        ));
643        vec.push((
644            reserved.add_bump_pointer_allocator(),
645            &plan.base().code_lo_space,
646        ));
647    }
648
649    #[cfg(feature = "ro_space")]
650    vec.push((reserved.add_bump_pointer_allocator(), &plan.base().ro_space));
651
652    // spaces in CommonPlan
653
654    if include_common_plan {
655        vec.push((
656            reserved.add_bump_pointer_allocator(),
657            plan.common().get_immortal(),
658        ));
659        vec.push((
660            reserved.add_large_object_allocator(),
661            plan.common().get_los(),
662        ));
663        vec.push((
664            if cfg!(feature = "marksweep_as_nonmoving") {
665                reserved.add_free_list_allocator()
666            } else if cfg!(feature = "immortal_as_nonmoving") {
667                reserved.add_bump_pointer_allocator()
668            } else {
669                reserved.add_immix_allocator()
670            },
671            plan.common().get_nonmoving(),
672        ));
673    }
674
675    reserved.validate();
676    vec
677}