mmtk/plan/
mutator_context.rs

1//! Mutator context for each application thread.
2
3use crate::plan::barriers::Barrier;
4use crate::plan::global::Plan;
5use crate::plan::AllocationSemantics;
6use crate::policy::space::Space;
7use crate::util::alloc::allocator::AllocationOptions;
8use crate::util::alloc::allocators::{AllocatorSelector, Allocators};
9use crate::util::alloc::Allocator;
10use crate::util::{Address, ObjectReference};
11use crate::util::{VMMutatorThread, VMWorkerThread};
12use crate::vm::VMBinding;
13use crate::MMTK;
14
15use enum_map::EnumMap;
16
17use super::barriers::NoBarrier;
18
19pub(crate) type SpaceMapping<VM> = Vec<(AllocatorSelector, &'static dyn Space<VM>)>;
20
21/// A place-holder implementation for `MutatorConfig::prepare_func` that should not be called.
22/// It is the most often used by plans that sets `PlanConstraints::needs_prepare_mutator` to
23/// `false`.  It is also used by `NoGC` because it must not trigger GC.
24pub(crate) fn unreachable_prepare_func<VM: VMBinding>(
25    _mutator: &mut Mutator<VM>,
26    _tls: VMWorkerThread,
27) {
28    unreachable!("`MutatorConfig::prepare_func` must not be called for the current plan.")
29}
30
31/// An mutator prepare implementation for plans that use [`crate::plan::global::CommonPlan`].
32#[allow(unused_variables)]
33pub(crate) fn common_prepare_func<VM: VMBinding>(mutator: &mut Mutator<VM>, _tls: VMWorkerThread) {
34    // Prepare the free list allocator used for non moving
35    #[cfg(feature = "marksweep_as_nonmoving")]
36    unsafe {
37        mutator.allocator_impl_mut_for_semantic::<crate::util::alloc::FreeListAllocator<VM>>(
38            AllocationSemantics::NonMoving,
39        )
40    }
41    .prepare();
42}
43
44/// A place-holder implementation for `MutatorConfig::release_func` that should not be called.
45/// Currently only used by `NoGC`.
46pub(crate) fn unreachable_release_func<VM: VMBinding>(
47    _mutator: &mut Mutator<VM>,
48    _tls: VMWorkerThread,
49) {
50    unreachable!("`MutatorConfig::release_func` must not be called for the current plan.")
51}
52
53/// An mutator release implementation for plans that use [`crate::plan::global::CommonPlan`].
54#[allow(unused_variables)]
55pub(crate) fn common_release_func<VM: VMBinding>(mutator: &mut Mutator<VM>, _tls: VMWorkerThread) {
56    cfg_if::cfg_if! {
57        if #[cfg(feature = "marksweep_as_nonmoving")] {
58            // Release the free list allocator used for non moving
59            unsafe { mutator.allocator_impl_mut_for_semantic::<crate::util::alloc::FreeListAllocator<VM>>(
60                AllocationSemantics::NonMoving,
61            )}.release();
62        } else if #[cfg(feature = "immortal_as_nonmoving")] {
63            // Do nothig for the bump pointer allocator
64        } else {
65            // Reset the Immix allocator
66            unsafe { mutator.allocator_impl_mut_for_semantic::<crate::util::alloc::ImmixAllocator<VM>>(
67                AllocationSemantics::NonMoving,
68            )}.reset();
69        }
70    }
71}
72
73/// A place-holder implementation for `MutatorConfig::release_func` that does nothing.
74#[allow(dead_code)]
75pub(crate) fn no_op_release_func<VM: VMBinding>(_mutator: &mut Mutator<VM>, _tls: VMWorkerThread) {}
76
77// This struct is part of the Mutator struct.
78// We are trying to make it fixed-sized so that VM bindings can easily define a Mutator type to have the exact same layout as our Mutator struct.
79#[repr(C)]
80pub struct MutatorConfig<VM: VMBinding> {
81    /// Mapping between allocation semantics and allocator selector
82    pub allocator_mapping: &'static EnumMap<AllocationSemantics, AllocatorSelector>,
83    /// Mapping between allocator selector and spaces. Each pair represents a mapping.
84    /// Put this behind a box, so it is a pointer-sized field.
85    #[allow(clippy::box_collection)]
86    pub space_mapping: Box<SpaceMapping<VM>>,
87    /// Plan-specific code for mutator prepare. The VMWorkerThread is the worker thread that executes this prepare function.
88    pub prepare_func: &'static (dyn Fn(&mut Mutator<VM>, VMWorkerThread) + Send + Sync),
89    /// Plan-specific code for mutator release. The VMWorkerThread is the worker thread that executes this release function.
90    pub release_func: &'static (dyn Fn(&mut Mutator<VM>, VMWorkerThread) + Send + Sync),
91}
92
93impl<VM: VMBinding> std::fmt::Debug for MutatorConfig<VM> {
94    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
95        f.write_str("MutatorConfig:\n")?;
96        f.write_str("Semantics mapping:\n")?;
97        for (semantic, selector) in self.allocator_mapping.iter() {
98            let space_name: &str = match self
99                .space_mapping
100                .iter()
101                .find(|(selector_to_find, _)| selector_to_find == selector)
102            {
103                Some((_, space)) => space.name(),
104                None => "!!!missing space here!!!",
105            };
106            f.write_fmt(format_args!(
107                "- {:?} = {:?} ({:?})\n",
108                semantic, selector, space_name
109            ))?;
110        }
111        f.write_str("Space mapping:\n")?;
112        for (selector, space) in self.space_mapping.iter() {
113            f.write_fmt(format_args!("- {:?} = {:?}\n", selector, space.name()))?;
114        }
115        Ok(())
116    }
117}
118
119/// Used to build a mutator struct
120pub struct MutatorBuilder<VM: VMBinding> {
121    barrier: Box<dyn Barrier<VM>>,
122    /// The mutator thread that is bound with this Mutator struct.
123    mutator_tls: VMMutatorThread,
124    mmtk: &'static MMTK<VM>,
125    config: MutatorConfig<VM>,
126}
127
128impl<VM: VMBinding> MutatorBuilder<VM> {
129    pub fn new(
130        mutator_tls: VMMutatorThread,
131        mmtk: &'static MMTK<VM>,
132        config: MutatorConfig<VM>,
133    ) -> Self {
134        MutatorBuilder {
135            barrier: Box::new(NoBarrier),
136            mutator_tls,
137            mmtk,
138            config,
139        }
140    }
141
142    pub fn barrier(mut self, barrier: Box<dyn Barrier<VM>>) -> Self {
143        self.barrier = barrier;
144        self
145    }
146
147    pub fn build(self) -> Mutator<VM> {
148        Mutator {
149            allocators: Allocators::<VM>::new(
150                self.mutator_tls,
151                self.mmtk,
152                &self.config.space_mapping,
153            ),
154            barrier: self.barrier,
155            mutator_tls: self.mutator_tls,
156            plan: self.mmtk.get_plan(),
157            config: self.config,
158        }
159    }
160}
161
162/// A mutator is a per-thread data structure that manages allocations and barriers. It is usually highly coupled with the language VM.
163/// It is recommended for MMTk users 1) to have a mutator struct of the same layout in the thread local storage that can be accessed efficiently,
164/// and 2) to implement fastpath allocation and barriers for the mutator in the VM side.
165// We are trying to make this struct fixed-sized so that VM bindings can easily define a type to have the exact same layout as this struct.
166// Currently Mutator is fixed sized, and we should try keep this invariant:
167// - Allocators are fixed-length arrays of allocators.
168// - MutatorConfig only has pointers/refs (including fat pointers), and is fixed sized.
169#[repr(C)]
170pub struct Mutator<VM: VMBinding> {
171    pub(crate) allocators: Allocators<VM>,
172    /// Holds some thread-local states for the barrier.
173    pub barrier: Box<dyn Barrier<VM>>,
174    /// The mutator thread that is bound with this Mutator struct.
175    pub mutator_tls: VMMutatorThread,
176    pub(crate) plan: &'static dyn Plan<VM = VM>,
177    pub(crate) config: MutatorConfig<VM>,
178}
179
180impl<VM: VMBinding> MutatorContext<VM> for Mutator<VM> {
181    fn prepare(&mut self, tls: VMWorkerThread) {
182        (*self.config.prepare_func)(self, tls)
183    }
184    fn release(&mut self, tls: VMWorkerThread) {
185        (*self.config.release_func)(self, tls)
186    }
187
188    // Note that this method is slow, and we expect VM bindings that care about performance to implement allocation fastpath sequence in their bindings.
189    fn alloc(
190        &mut self,
191        size: usize,
192        align: usize,
193        offset: usize,
194        allocator: AllocationSemantics,
195    ) -> Address {
196        let allocator = unsafe {
197            self.allocators
198                .get_allocator_mut(self.config.allocator_mapping[allocator])
199        };
200        // The value should be default/unset at the beginning of an allocation request.
201        debug_assert!(allocator.get_context().get_alloc_options().is_default());
202        allocator.alloc(size, align, offset)
203    }
204
205    fn alloc_with_options(
206        &mut self,
207        size: usize,
208        align: usize,
209        offset: usize,
210        allocator: AllocationSemantics,
211        options: AllocationOptions,
212    ) -> Address {
213        let allocator = unsafe {
214            self.allocators
215                .get_allocator_mut(self.config.allocator_mapping[allocator])
216        };
217        // The value should be default/unset at the beginning of an allocation request.
218        debug_assert!(allocator.get_context().get_alloc_options().is_default());
219        allocator.alloc_with_options(size, align, offset, options)
220    }
221
222    fn alloc_slow(
223        &mut self,
224        size: usize,
225        align: usize,
226        offset: usize,
227        allocator: AllocationSemantics,
228    ) -> Address {
229        let allocator = unsafe {
230            self.allocators
231                .get_allocator_mut(self.config.allocator_mapping[allocator])
232        };
233        // The value should be default/unset at the beginning of an allocation request.
234        debug_assert!(allocator.get_context().get_alloc_options().is_default());
235        allocator.alloc_slow(size, align, offset)
236    }
237
238    fn alloc_slow_with_options(
239        &mut self,
240        size: usize,
241        align: usize,
242        offset: usize,
243        allocator: AllocationSemantics,
244        options: AllocationOptions,
245    ) -> Address {
246        let allocator = unsafe {
247            self.allocators
248                .get_allocator_mut(self.config.allocator_mapping[allocator])
249        };
250        // The value should be default/unset at the beginning of an allocation request.
251        debug_assert!(allocator.get_context().get_alloc_options().is_default());
252        allocator.alloc_slow_with_options(size, align, offset, options)
253    }
254
255    // Note that this method is slow, and we expect VM bindings that care about performance to implement allocation fastpath sequence in their bindings.
256    fn post_alloc(
257        &mut self,
258        refer: ObjectReference,
259        _bytes: usize,
260        allocator: AllocationSemantics,
261    ) {
262        unsafe {
263            self.allocators
264                .get_allocator_mut(self.config.allocator_mapping[allocator])
265        }
266        .get_space()
267        .initialize_object_metadata(refer)
268    }
269
270    fn get_tls(&self) -> VMMutatorThread {
271        self.mutator_tls
272    }
273
274    fn barrier(&mut self) -> &mut dyn Barrier<VM> {
275        &mut *self.barrier
276    }
277}
278
279impl<VM: VMBinding> Mutator<VM> {
280    /// Get all the valid allocator selector (no duplicate)
281    fn get_all_allocator_selectors(&self) -> Vec<AllocatorSelector> {
282        use itertools::Itertools;
283        self.config
284            .allocator_mapping
285            .iter()
286            .map(|(_, selector)| *selector)
287            .sorted()
288            .dedup()
289            .filter(|selector| *selector != AllocatorSelector::None)
290            .collect()
291    }
292
293    /// Inform each allocator about destroying. Call allocator-specific on destroy methods.
294    pub fn on_destroy(&mut self) {
295        for selector in self.get_all_allocator_selectors() {
296            unsafe { self.allocators.get_allocator_mut(selector) }.on_mutator_destroy();
297        }
298    }
299
300    /// Get the allocator for the selector.
301    ///
302    /// # Safety
303    /// The selector needs to be valid, and points to an allocator that has been initialized.
304    /// [`crate::memory_manager::get_allocator_mapping`] can be used to get a selector.
305    pub unsafe fn allocator(&self, selector: AllocatorSelector) -> &dyn Allocator<VM> {
306        self.allocators.get_allocator(selector)
307    }
308
309    /// Get the mutable allocator for the selector.
310    ///
311    /// # Safety
312    /// The selector needs to be valid, and points to an allocator that has been initialized.
313    /// [`crate::memory_manager::get_allocator_mapping`] can be used to get a selector.
314    pub unsafe fn allocator_mut(&mut self, selector: AllocatorSelector) -> &mut dyn Allocator<VM> {
315        self.allocators.get_allocator_mut(selector)
316    }
317
318    /// Get the allocator of a concrete type for the selector.
319    ///
320    /// # Safety
321    /// The selector needs to be valid, and points to an allocator that has been initialized.
322    /// [`crate::memory_manager::get_allocator_mapping`] can be used to get a selector.
323    pub unsafe fn allocator_impl<T: Allocator<VM>>(&self, selector: AllocatorSelector) -> &T {
324        self.allocators.get_typed_allocator(selector)
325    }
326
327    /// Get the mutable allocator of a concrete type for the selector.
328    ///
329    /// # Safety
330    /// The selector needs to be valid, and points to an allocator that has been initialized.
331    /// [`crate::memory_manager::get_allocator_mapping`] can be used to get a selector.
332    pub unsafe fn allocator_impl_mut<T: Allocator<VM>>(
333        &mut self,
334        selector: AllocatorSelector,
335    ) -> &mut T {
336        self.allocators.get_typed_allocator_mut(selector)
337    }
338
339    /// Get the allocator of a concrete type for the semantic.
340    ///
341    /// # Safety
342    /// The semantic needs to match the allocator type.
343    pub unsafe fn allocator_impl_for_semantic<T: Allocator<VM>>(
344        &self,
345        semantic: AllocationSemantics,
346    ) -> &T {
347        self.allocator_impl::<T>(self.config.allocator_mapping[semantic])
348    }
349
350    /// Get the mutable allocator of a concrete type for the semantic.
351    ///
352    /// # Safety
353    /// The semantic needs to match the allocator type.
354    pub unsafe fn allocator_impl_mut_for_semantic<T: Allocator<VM>>(
355        &mut self,
356        semantic: AllocationSemantics,
357    ) -> &mut T {
358        self.allocator_impl_mut::<T>(self.config.allocator_mapping[semantic])
359    }
360
361    /// Return the base offset from a mutator pointer to the allocator specified by the selector.
362    pub fn get_allocator_base_offset(selector: AllocatorSelector) -> usize {
363        use crate::util::alloc::*;
364        use memoffset::offset_of;
365        use std::mem::size_of;
366        offset_of!(Mutator<VM>, allocators)
367            + match selector {
368                AllocatorSelector::BumpPointer(index) => {
369                    offset_of!(Allocators<VM>, bump_pointer)
370                        + size_of::<BumpAllocator<VM>>() * index as usize
371                }
372                AllocatorSelector::FreeList(index) => {
373                    offset_of!(Allocators<VM>, free_list)
374                        + size_of::<FreeListAllocator<VM>>() * index as usize
375                }
376                AllocatorSelector::Immix(index) => {
377                    offset_of!(Allocators<VM>, immix)
378                        + size_of::<ImmixAllocator<VM>>() * index as usize
379                }
380                AllocatorSelector::LargeObject(index) => {
381                    offset_of!(Allocators<VM>, large_object)
382                        + size_of::<LargeObjectAllocator<VM>>() * index as usize
383                }
384                AllocatorSelector::Malloc(index) => {
385                    offset_of!(Allocators<VM>, malloc)
386                        + size_of::<MallocAllocator<VM>>() * index as usize
387                }
388                AllocatorSelector::MarkCompact(index) => {
389                    offset_of!(Allocators<VM>, markcompact)
390                        + size_of::<MarkCompactAllocator<VM>>() * index as usize
391                }
392                AllocatorSelector::None => panic!("Expect a valid AllocatorSelector, found None"),
393            }
394    }
395}
396
397/// Each GC plan should provide their implementation of a MutatorContext. *Note that this trait is no longer needed as we removed
398/// per-plan mutator implementation and we will remove this trait as well in the future.*
399// TODO: We should be able to remove this trait, as we removed per-plan mutator implementation, and there is no other type that implements this trait.
400// The Mutator struct above is the only type that implements this trait. We should be able to merge them.
401pub trait MutatorContext<VM: VMBinding>: Send + 'static {
402    /// Do the prepare work for this mutator.
403    fn prepare(&mut self, tls: VMWorkerThread);
404    /// Do the release work for this mutator.
405    fn release(&mut self, tls: VMWorkerThread);
406    /// Allocate memory for an object. This function will trigger a GC on failed allocation.
407    ///
408    /// Arguments:
409    /// * `size`: the number of bytes required for the object.
410    /// * `align`: required alignment for the object.
411    /// * `offset`: offset associated with the alignment. The result plus the offset will be aligned to the given alignment.
412    /// * `allocator`: the allocation semantic used for this object.
413    fn alloc(
414        &mut self,
415        size: usize,
416        align: usize,
417        offset: usize,
418        allocator: AllocationSemantics,
419    ) -> Address;
420    /// Allocate memory for an object with more options to control this allocation request, e.g. not triggering a GC on fail.
421    ///
422    /// Arguments:
423    /// * `size`: the number of bytes required for the object.
424    /// * `align`: required alignment for the object.
425    /// * `offset`: offset associated with the alignment. The result plus the offset will be aligned to the given alignment.
426    /// * `allocator`: the allocation semantic used for this object.
427    /// * `options`: the allocation options to change the default allocation behavior for this request.
428    fn alloc_with_options(
429        &mut self,
430        size: usize,
431        align: usize,
432        offset: usize,
433        allocator: AllocationSemantics,
434        options: AllocationOptions,
435    ) -> Address;
436    /// The slow path allocation for [`MutatorContext::alloc`]. This function will trigger a GC on failed allocation.
437    ///
438    ///  This is only useful when the binding
439    /// implements the fast path allocation, and would like to explicitly
440    /// call the slow path after the fast path allocation fails.
441    fn alloc_slow(
442        &mut self,
443        size: usize,
444        align: usize,
445        offset: usize,
446        allocator: AllocationSemantics,
447    ) -> Address;
448    /// The slow path allocation for [`MutatorContext::alloc_with_options`].
449    ///
450    /// This is only useful when the binding
451    /// implements the fast path allocation, and would like to explicitly
452    /// call the slow path after the fast path allocation fails.
453    fn alloc_slow_with_options(
454        &mut self,
455        size: usize,
456        align: usize,
457        offset: usize,
458        allocator: AllocationSemantics,
459        options: AllocationOptions,
460    ) -> Address;
461    /// Perform post-allocation actions.  For many allocators none are
462    /// required.
463    ///
464    /// Arguments:
465    /// * `refer`: the newly allocated object.
466    /// * `bytes`: the size of the space allocated (in bytes).
467    /// * `allocator`: the allocation semantic used.
468    fn post_alloc(&mut self, refer: ObjectReference, bytes: usize, allocator: AllocationSemantics);
469    /// Flush per-mutator remembered sets and create GC work for the remembered sets.
470    fn flush_remembered_sets(&mut self) {
471        self.barrier().flush();
472    }
473    /// Flush the mutator context.
474    fn flush(&mut self) {
475        self.flush_remembered_sets();
476    }
477    /// Get the mutator thread for this mutator context. This is the same value as the argument supplied in
478    /// [`crate::memory_manager::bind_mutator`] when this mutator is created.
479    fn get_tls(&self) -> VMMutatorThread;
480    /// Get active barrier trait object
481    fn barrier(&mut self) -> &mut dyn Barrier<VM>;
482}
483
484/// This is used for plans to indicate the number of allocators reserved for the plan.
485/// This is used as a parameter for creating allocator/space mapping.
486/// A plan is required to reserve the first few allocators. For example, if n_bump_pointer is 1,
487/// it means the first bump pointer allocator will be reserved for the plan (and the plan should
488/// initialize its mapping itself), and the spaces in common/base plan will use the following bump
489/// pointer allocators.
490#[allow(dead_code)]
491#[derive(Default)]
492pub(crate) struct ReservedAllocators {
493    pub n_bump_pointer: u8,
494    pub n_large_object: u8,
495    pub n_malloc: u8,
496    pub n_immix: u8,
497    pub n_mark_compact: u8,
498    pub n_free_list: u8,
499}
500
501impl ReservedAllocators {
502    pub const DEFAULT: Self = ReservedAllocators {
503        n_bump_pointer: 0,
504        n_large_object: 0,
505        n_malloc: 0,
506        n_immix: 0,
507        n_mark_compact: 0,
508        n_free_list: 0,
509    };
510    /// check if the number of each allocator is okay. Panics if any allocator exceeds the max number.
511    fn validate(&self) {
512        use crate::util::alloc::allocators::*;
513        assert!(
514            self.n_bump_pointer as usize <= MAX_BUMP_ALLOCATORS,
515            "Allocator mapping declared more bump pointer allocators than the max allowed."
516        );
517        assert!(
518            self.n_large_object as usize <= MAX_LARGE_OBJECT_ALLOCATORS,
519            "Allocator mapping declared more large object allocators than the max allowed."
520        );
521        assert!(
522            self.n_malloc as usize <= MAX_MALLOC_ALLOCATORS,
523            "Allocator mapping declared more malloc allocators than the max allowed."
524        );
525        assert!(
526            self.n_immix as usize <= MAX_IMMIX_ALLOCATORS,
527            "Allocator mapping declared more immix allocators than the max allowed."
528        );
529        assert!(
530            self.n_mark_compact as usize <= MAX_MARK_COMPACT_ALLOCATORS,
531            "Allocator mapping declared more mark compact allocators than the max allowed."
532        );
533        assert!(
534            self.n_free_list as usize <= MAX_FREE_LIST_ALLOCATORS,
535            "Allocator mapping declared more free list allocators than the max allowed."
536        );
537    }
538
539    // We may add more allocators from common/base plan after reserved allocators.
540
541    fn add_bump_pointer_allocator(&mut self) -> AllocatorSelector {
542        let selector = AllocatorSelector::BumpPointer(self.n_bump_pointer);
543        self.n_bump_pointer += 1;
544        selector
545    }
546    fn add_large_object_allocator(&mut self) -> AllocatorSelector {
547        let selector = AllocatorSelector::LargeObject(self.n_large_object);
548        self.n_large_object += 1;
549        selector
550    }
551    #[allow(dead_code)]
552    fn add_malloc_allocator(&mut self) -> AllocatorSelector {
553        let selector = AllocatorSelector::Malloc(self.n_malloc);
554        self.n_malloc += 1;
555        selector
556    }
557    #[allow(dead_code)]
558    fn add_immix_allocator(&mut self) -> AllocatorSelector {
559        let selector = AllocatorSelector::Immix(self.n_immix);
560        self.n_immix += 1;
561        selector
562    }
563    #[allow(dead_code)]
564    fn add_mark_compact_allocator(&mut self) -> AllocatorSelector {
565        let selector = AllocatorSelector::MarkCompact(self.n_mark_compact);
566        self.n_mark_compact += 1;
567        selector
568    }
569    #[allow(dead_code)]
570    fn add_free_list_allocator(&mut self) -> AllocatorSelector {
571        let selector = AllocatorSelector::FreeList(self.n_free_list);
572        self.n_free_list += 1;
573        selector
574    }
575}
576
577/// Create an allocator mapping for spaces in Common/BasePlan for a plan. A plan should reserve its own allocators.
578///
579/// # Arguments
580/// * `reserved`: the number of reserved allocators for the plan specific policies.
581/// * `include_common_plan`: whether the plan uses common plan. If a plan uses CommonPlan, we will initialize allocator mapping for spaces in CommonPlan.
582pub(crate) fn create_allocator_mapping(
583    mut reserved: ReservedAllocators,
584    include_common_plan: bool,
585) -> EnumMap<AllocationSemantics, AllocatorSelector> {
586    // If we need to add new allocators, or new spaces, we need to make sure the allocator we assign here matches the allocator
587    // we used in create_space_mapping(). The easiest way is to add the space/allocator mapping in the same order. So for any modification to this
588    // function, please check the other function.
589
590    let mut map = EnumMap::<AllocationSemantics, AllocatorSelector>::default();
591
592    // spaces in base plan
593
594    #[cfg(feature = "code_space")]
595    {
596        map[AllocationSemantics::Code] = reserved.add_bump_pointer_allocator();
597        map[AllocationSemantics::LargeCode] = reserved.add_bump_pointer_allocator();
598    }
599
600    #[cfg(feature = "ro_space")]
601    {
602        map[AllocationSemantics::ReadOnly] = reserved.add_bump_pointer_allocator();
603    }
604
605    // spaces in common plan
606
607    if include_common_plan {
608        map[AllocationSemantics::Immortal] = reserved.add_bump_pointer_allocator();
609        map[AllocationSemantics::Los] = reserved.add_large_object_allocator();
610        map[AllocationSemantics::NonMoving] = if cfg!(feature = "marksweep_as_nonmoving") {
611            reserved.add_free_list_allocator()
612        } else if cfg!(feature = "immortal_as_nonmoving") {
613            reserved.add_bump_pointer_allocator()
614        } else {
615            reserved.add_immix_allocator()
616        };
617    }
618
619    reserved.validate();
620    map
621}
622
623/// Create a space mapping for spaces in Common/BasePlan for a plan. A plan should reserve its own allocators.
624///
625/// # Arguments
626/// * `reserved`: the number of reserved allocators for the plan specific policies.
627/// * `include_common_plan`: whether the plan uses common plan. If a plan uses CommonPlan, we will initialize allocator mapping for spaces in CommonPlan.
628/// * `plan`: the reference to the plan.
629pub(crate) fn create_space_mapping<VM: VMBinding>(
630    mut reserved: ReservedAllocators,
631    include_common_plan: bool,
632    plan: &'static dyn Plan<VM = VM>,
633) -> Vec<(AllocatorSelector, &'static dyn Space<VM>)> {
634    // If we need to add new allocators, or new spaces, we need to make sure the allocator we assign here matches the allocator
635    // we used in create_space_mapping(). The easiest way is to add the space/allocator mapping in the same order. So for any modification to this
636    // function, please check the other function.
637
638    let mut vec: Vec<(AllocatorSelector, &'static dyn Space<VM>)> = vec![];
639
640    // spaces in BasePlan
641
642    #[cfg(feature = "code_space")]
643    {
644        vec.push((
645            reserved.add_bump_pointer_allocator(),
646            &plan.base().code_space,
647        ));
648        vec.push((
649            reserved.add_bump_pointer_allocator(),
650            &plan.base().code_lo_space,
651        ));
652    }
653
654    #[cfg(feature = "ro_space")]
655    vec.push((reserved.add_bump_pointer_allocator(), &plan.base().ro_space));
656
657    // spaces in CommonPlan
658
659    if include_common_plan {
660        vec.push((
661            reserved.add_bump_pointer_allocator(),
662            plan.common().get_immortal(),
663        ));
664        vec.push((
665            reserved.add_large_object_allocator(),
666            plan.common().get_los(),
667        ));
668        vec.push((
669            if cfg!(feature = "marksweep_as_nonmoving") {
670                reserved.add_free_list_allocator()
671            } else if cfg!(feature = "immortal_as_nonmoving") {
672                reserved.add_bump_pointer_allocator()
673            } else {
674                reserved.add_immix_allocator()
675            },
676            plan.common().get_nonmoving(),
677        ));
678    }
679
680    reserved.validate();
681    vec
682}