mmtk/policy/
space.rs

1use crate::global_state::GlobalState;
2use crate::plan::PlanConstraints;
3use crate::scheduler::GCWorkScheduler;
4use crate::util::conversions::*;
5use crate::util::metadata::side_metadata::{
6    SideMetadataContext, SideMetadataSanity, SideMetadataSpec,
7};
8use crate::util::object_enum::ObjectEnumerator;
9use crate::util::Address;
10use crate::util::ObjectReference;
11
12use crate::util::heap::layout::vm_layout::{vm_layout, LOG_BYTES_IN_CHUNK};
13use crate::util::heap::{PageResource, VMRequest};
14use crate::util::options::Options;
15use crate::vm::{ActivePlan, Collection};
16
17use crate::util::constants::{LOG_BYTES_IN_MBYTE, LOG_BYTES_IN_PAGE};
18use crate::util::conversions;
19use crate::util::opaque_pointer::*;
20
21use crate::mmtk::SFT_MAP;
22#[cfg(debug_assertions)]
23use crate::policy::sft::EMPTY_SFT_NAME;
24use crate::policy::sft::SFT;
25use crate::util::alloc::allocator::AllocationOptions;
26use crate::util::copy::*;
27use crate::util::heap::gc_trigger::GCTrigger;
28use crate::util::heap::layout::vm_layout::BYTES_IN_CHUNK;
29use crate::util::heap::layout::Mmapper;
30use crate::util::heap::layout::VMMap;
31use crate::util::heap::space_descriptor::SpaceDescriptor;
32use crate::util::heap::HeapMeta;
33use crate::util::os::*;
34use crate::vm::VMBinding;
35
36use std::marker::PhantomData;
37use std::sync::atomic::AtomicBool;
38use std::sync::Arc;
39use std::sync::Mutex;
40
41use downcast_rs::Downcast;
42
43pub trait Space<VM: VMBinding>: 'static + SFT + Sync + Downcast {
44    fn as_space(&self) -> &dyn Space<VM>;
45    fn as_sft(&self) -> &(dyn SFT + Sync + 'static);
46    fn get_page_resource(&self) -> &dyn PageResource<VM>;
47
48    /// Get a mutable reference to the underlying page resource, or `None` if the space does not
49    /// have a page resource.
50    fn maybe_get_page_resource_mut(&mut self) -> Option<&mut dyn PageResource<VM>>;
51
52    /// Initialize entires in SFT map for the space. This is called when the Space object
53    /// has a non-moving address, as we will use the address to set sft.
54    /// Currently after we create a boxed plan, spaces in the plan have a non-moving address.
55    fn initialize_sft(&self, sft_map: &mut dyn crate::policy::sft_map::SFTMap);
56
57    /// A check for the obvious out-of-memory case: if the requested size is larger than
58    /// the heap size, it is definitely an OOM. We would like to identify that, and
59    /// allows the binding to deal with OOM. Without this check, we will attempt
60    /// to allocate from the page resource. If the requested size is unrealistically large
61    /// (such as `usize::MAX`), it breaks the assumptions of our implementation of
62    /// page resource, vm map, etc. This check prevents that, and allows us to
63    /// handle the OOM case.
64    /// Each allocator that may request an arbitrary size should call this method before
65    /// acquring memory from the space. For example, bump pointer allocator and large object
66    /// allocator need to call this method. On the other hand, allocators that only allocate
67    /// memory in fixed size blocks do not need to call this method.
68    /// An allocator should call this method before doing any computation on the size to
69    /// avoid arithmatic overflow. If we have to do computation in the allocation fastpath and
70    /// overflow happens there, there is nothing we can do about it.
71    /// Return a boolean to indicate if we will be out of memory, determined by the check.
72    fn will_oom_on_acquire(&self, size: usize) -> bool {
73        let max_pages = self.get_gc_trigger().policy.get_max_heap_size_in_pages();
74        let requested_pages = size >> LOG_BYTES_IN_PAGE;
75        requested_pages > max_pages
76    }
77
78    /// Check if the requested `size` is an obvious out-of-memory case using
79    /// [`Self::will_oom_on_acquire`] and, if it is, call `Collection::out_of_memory`.  Return the
80    /// result of `will_oom_on_acquire`.
81    fn handle_obvious_oom_request(
82        &self,
83        tls: VMThread,
84        size: usize,
85        alloc_options: AllocationOptions,
86    ) -> bool {
87        if self.will_oom_on_acquire(size) {
88            if alloc_options.allow_oom_call {
89                VM::VMCollection::out_of_memory(
90                    tls,
91                    crate::util::alloc::AllocationError::HeapOutOfMemory,
92                );
93            }
94            return true;
95        }
96        false
97    }
98
99    fn acquire(&self, tls: VMThread, pages: usize, alloc_options: AllocationOptions) -> Address {
100        trace!(
101            "Space.acquire, tls={:?}, alloc_options={:?}",
102            tls,
103            alloc_options
104        );
105
106        debug_assert!(
107            !self.will_oom_on_acquire(pages << LOG_BYTES_IN_PAGE),
108            "The requested pages is larger than the max heap size. Is will_go_oom_on_acquire used before acquring memory?"
109        );
110
111        trace!("Reserving pages");
112        let pr = self.get_page_resource();
113        let pages_reserved = pr.reserve_pages(pages);
114        trace!("Pages reserved");
115
116        // Should we poll before acquring pages from page resources so that it can trigger a GC?
117        // - If tls is collector, we cannot attempt a GC.
118        let should_poll = VM::VMActivePlan::is_mutator(tls);
119
120        // If we should poll, do it now.  Record if it has triggered a GC.
121        // If we should not poll, GC is not triggered.
122        let gc_triggered = should_poll && {
123            trace!("Polling ..");
124            self.get_gc_trigger().poll(false, Some(self.as_space()))
125        };
126
127        // We can try to get pages if
128        // - GC is not triggered, or
129        // - GC is triggered, but we allow over-committing.
130        let should_get_pages = !gc_triggered || alloc_options.allow_overcommit;
131
132        // Get new pages if we should. If we didn't get new pages from the page resource for any
133        // reason (if we decided not to, or if we tried and failed), this function shall return a
134        // null address.
135        if should_get_pages {
136            if let Some(addr) = self.get_new_pages_and_initialize(tls, pages, pr, pages_reserved) {
137                addr
138            } else {
139                self.not_acquiring(tls, alloc_options, pr, pages_reserved, true);
140                Address::ZERO
141            }
142        } else {
143            self.not_acquiring(tls, alloc_options, pr, pages_reserved, false);
144            Address::ZERO
145        }
146    }
147
148    /// Get new pages from the page resource, and do necessary initialization, including mmapping
149    /// and zeroing the memory.
150    ///
151    /// The caller must have reserved pages from the page resource.  If successfully acquired pages
152    /// from the page resource, the reserved pages will be committed.
153    ///
154    /// Returns `None` if failed to acquire memory from the page resource.  The caller should call
155    /// `pr.clear_request`.
156    fn get_new_pages_and_initialize(
157        &self,
158        tls: VMThread,
159        pages: usize,
160        pr: &dyn PageResource<VM>,
161        pages_reserved: usize,
162    ) -> Option<Address> {
163        // We need this lock: Othrewise, it is possible that one thread acquires pages in a new chunk, but not yet
164        // set SFT for it (in grow_space()), and another thread acquires pages in the same chunk, which is not
165        // a new chunk so grow_space() won't be called on it. The second thread could return a result in the chunk before
166        // its SFT is properly set.
167        // We need to minimize the scope of this lock for performance when we have many threads (mutator threads, or GC threads with copying allocators).
168        // See: https://github.com/mmtk/mmtk-core/issues/610
169        let lock = self.common().acquire_lock.lock().unwrap();
170
171        let Ok(res) = pr.get_new_pages(self.common().descriptor, pages_reserved, pages, tls) else {
172            return None;
173        };
174
175        debug!(
176            "Got new pages {} ({} pages) for {} in chunk {}, new_chunk? {}",
177            res.start,
178            res.pages,
179            self.get_name(),
180            conversions::chunk_align_down(res.start),
181            res.new_chunk
182        );
183        let bytes = conversions::pages_to_bytes(res.pages);
184
185        let mmap = || {
186            // Mmap the pages and the side metadata, and handle error. In case of any error,
187            // we will either call back to the VM for OOM, or simply panic.
188            if let Err(mmap_error) = self
189                .common()
190                .mmapper
191                .ensure_mapped(
192                    res.start,
193                    res.pages,
194                    if *self.common().options.transparent_hugepages {
195                        HugePageSupport::TransparentHugePages
196                    } else {
197                        HugePageSupport::No
198                    },
199                    self.common().mmap_protection(),
200                    &MmapAnnotation::Space {
201                        name: self.get_name(),
202                    },
203                )
204                .and(self.common().metadata.try_map_metadata_space(
205                    res.start,
206                    bytes,
207                    self.get_name(),
208                ))
209            {
210                OS::handle_mmap_error::<VM>(mmap_error, tls);
211            }
212        };
213        let grow_space = || {
214            self.grow_space(res.start, bytes, res.new_chunk);
215        };
216
217        // The scope of the lock is important in terms of performance when we have many allocator threads.
218        if SFT_MAP.get_side_metadata().is_some() {
219            // If the SFT map uses side metadata, so we have to initialize side metadata first.
220            mmap();
221            // then grow space, which will use the side metadata we mapped above
222            grow_space();
223            // then we can drop the lock after grow_space()
224            drop(lock);
225        } else {
226            // In normal cases, we can drop lock immediately after grow_space()
227            grow_space();
228            drop(lock);
229            // and map side metadata without holding the lock
230            mmap();
231        }
232
233        // TODO: Concurrent zeroing
234        if self.common().zeroed {
235            crate::util::memory::zero(res.start, bytes);
236        }
237
238        // Some assertions
239        {
240            // --- Assert the start of the allocated region ---
241            // The start address SFT should be correct.
242            debug_assert_eq!(SFT_MAP.get_checked(res.start).name(), self.get_name());
243            // The start address is in our space.
244            debug_assert!(self.address_in_space(res.start));
245            // The descriptor should be correct.
246            debug_assert_eq!(
247                self.common().vm_map().get_descriptor_for_address(res.start),
248                self.common().descriptor
249            );
250
251            // --- Assert the last byte in the allocated region ---
252            let last_byte = res.start + bytes - 1;
253            // The SFT for the last byte in the allocated memory should be correct.
254            debug_assert_eq!(SFT_MAP.get_checked(last_byte).name(), self.get_name());
255            // The last byte in the allocated memory should be in this space.
256            debug_assert!(self.address_in_space(last_byte));
257            // The descriptor for the last byte should be correct.
258            debug_assert_eq!(
259                self.common().vm_map().get_descriptor_for_address(last_byte),
260                self.common().descriptor
261            );
262        }
263
264        debug!("Space.acquire(), returned = {}", res.start);
265        Some(res.start)
266    }
267
268    /// Handle the case where [`Space::acquire`] will not or can not acquire pages from the page
269    /// resource.  This may happen when
270    /// -   GC is triggered and the allocation does not allow over-committing, or
271    /// -   the allocation tried to acquire pages from the page resource but ran out of physical
272    ///     memory.
273    fn not_acquiring(
274        &self,
275        tls: VMThread,
276        alloc_options: AllocationOptions,
277        pr: &dyn PageResource<VM>,
278        pages_reserved: usize,
279        attempted_allocation_and_failed: bool,
280    ) {
281        assert!(
282            VM::VMActivePlan::is_mutator(tls),
283            "A non-mutator thread failed to get pages from page resource.  \
284            Copying GC plans should compute the copying headroom carefully to prevent this."
285        );
286
287        // Clear the request
288        pr.clear_request(pages_reserved);
289
290        // If we are not at a safepoint, return immediately.
291        if !alloc_options.at_safepoint {
292            return;
293        }
294
295        debug!("Collection required");
296
297        if !self.common().global_state.is_initialized() {
298            // Otherwise do GC here
299            panic!(
300                "GC is not allowed here: collection is not initialized \
301                    (did you call initialize_collection()?).  \
302                    Out of physical memory: {phy}",
303                phy = attempted_allocation_and_failed
304            );
305        }
306
307        if attempted_allocation_and_failed {
308            // We thought we had memory to allocate, but somehow failed the allocation. Will force a GC.
309            let gc_performed = self.get_gc_trigger().poll(true, Some(self.as_space()));
310            debug_assert!(gc_performed, "GC not performed when forced.");
311        }
312
313        // Inform GC trigger about the pending allocation.
314        let meta_pages_reserved = self.estimate_side_meta_pages(pages_reserved);
315        let total_pages_reserved = pages_reserved + meta_pages_reserved;
316        self.get_gc_trigger()
317            .policy
318            .on_pending_allocation(total_pages_reserved);
319
320        VM::VMCollection::block_for_gc(VMMutatorThread(tls)); // We have checked that this is mutator
321    }
322
323    fn address_in_space(&self, start: Address) -> bool {
324        if !self.common().descriptor.is_contiguous() {
325            self.common().vm_map().get_descriptor_for_address(start) == self.common().descriptor
326        } else {
327            start >= self.common().start && start < self.common().start + self.common().extent
328        }
329    }
330
331    fn in_space(&self, object: ObjectReference) -> bool {
332        self.address_in_space(object.to_raw_address())
333    }
334
335    /**
336     * This is called after we get result from page resources.  The space may
337     * tap into the hook to monitor heap growth.  The call is made from within the
338     * page resources' critical region, immediately before yielding the lock.
339     *
340     * @param start The start of the newly allocated space
341     * @param bytes The size of the newly allocated space
342     * @param new_chunk {@code true} if the new space encroached upon or started a new chunk or chunks.
343     */
344    fn grow_space(&self, start: Address, bytes: usize, new_chunk: bool) {
345        trace!(
346            "Grow space from {} for {} bytes (new chunk = {})",
347            start,
348            bytes,
349            new_chunk
350        );
351
352        // If this is not a new chunk, the SFT for [start, start + bytes) should alreayd be initialized.
353        #[cfg(debug_assertions)]
354        if !new_chunk {
355            debug_assert!(
356                SFT_MAP.get_checked(start).name() != EMPTY_SFT_NAME,
357                "In grow_space(start = {}, bytes = {}, new_chunk = {}), we have empty SFT entries (chunk for {} = {})",
358                start,
359                bytes,
360                new_chunk,
361                start,
362                SFT_MAP.get_checked(start).name()
363            );
364            debug_assert!(
365                SFT_MAP.get_checked(start + bytes - 1).name() != EMPTY_SFT_NAME,
366                "In grow_space(start = {}, bytes = {}, new_chunk = {}), we have empty SFT entries (chunk for {} = {})",
367                start,
368                bytes,
369                new_chunk,
370                start + bytes - 1,
371                SFT_MAP.get_checked(start + bytes - 1).name()
372            );
373        }
374
375        if new_chunk {
376            unsafe { SFT_MAP.update(self.as_sft(), start, bytes) };
377        }
378    }
379
380    /// Estimate the amount of side metadata memory needed for a give data memory size in pages. The
381    /// result will over-estimate the amount of metadata pages needed, with at least one page per
382    /// side metadata.  This relatively accurately describes the number of side metadata pages the
383    /// space actually consumes.
384    ///
385    /// This function is used for both triggering GC (via [`Space::reserved_pages`]) and resizing
386    /// the heap (via [`crate::util::heap::GCTriggerPolicy::on_pending_allocation`]).
387    fn estimate_side_meta_pages(&self, data_pages: usize) -> usize {
388        self.common().metadata.calculate_reserved_pages(data_pages)
389    }
390
391    fn reserved_pages(&self) -> usize {
392        let data_pages = self.get_page_resource().reserved_pages();
393        let meta_pages = self.estimate_side_meta_pages(data_pages);
394        data_pages + meta_pages
395    }
396
397    /// Return the number of physical pages available.
398    fn available_physical_pages(&self) -> usize {
399        self.get_page_resource().get_available_physical_pages()
400    }
401
402    fn get_name(&self) -> &'static str {
403        self.common().name
404    }
405
406    fn get_descriptor(&self) -> SpaceDescriptor {
407        self.common().descriptor
408    }
409
410    fn common(&self) -> &CommonSpace<VM>;
411    fn get_gc_trigger(&self) -> &GCTrigger<VM> {
412        self.common().gc_trigger.as_ref()
413    }
414
415    fn release_multiple_pages(&mut self, start: Address);
416
417    /// What copy semantic we should use for this space if we copy objects from this space.
418    /// This is only needed for plans that use SFTProcessEdges
419    fn set_copy_for_sft_trace(&mut self, _semantics: Option<CopySemantics>) {
420        panic!("A copying space should override this method")
421    }
422
423    /// Ensure that the current space's metadata context does not have any issues.
424    /// Panics with a suitable message if any issue is detected.
425    /// It also initialises the sanity maps which will then be used if the `extreme_assertions` feature is active.
426    /// Internally this calls verify_metadata_context() from `util::metadata::sanity`
427    ///
428    /// This function is called once per space by its parent plan but may be called multiple times per policy.
429    ///
430    /// Arguments:
431    /// * `side_metadata_sanity_checker`: The `SideMetadataSanity` object instantiated in the calling plan.
432    fn verify_side_metadata_sanity(&self, side_metadata_sanity_checker: &mut SideMetadataSanity) {
433        side_metadata_sanity_checker
434            .verify_metadata_context(std::any::type_name::<Self>(), &self.common().metadata)
435    }
436
437    /// Enumerate objects in the current space.
438    ///
439    /// Implementers can use the `enumerator` to report
440    ///
441    /// -   individual objects within the space using `enumerator.visit_object`, and
442    /// -   ranges of address that may contain objects using `enumerator.visit_address_range`. The
443    ///     caller will then enumerate objects in the range using the VO bits metadata.
444    ///
445    /// Each object in the space shall be covered by one of the two methods above.
446    ///
447    /// # Implementation considerations
448    ///
449    /// **Skipping empty ranges**: When enumerating address ranges, spaces can skip ranges (blocks,
450    /// chunks, etc.) that are guarenteed not to contain objects.
451    ///
452    /// **Dynamic dispatch**: Because `Space` is a trait object type and `enumerator` is a `dyn`
453    /// reference, invoking methods of `enumerator` involves a dynamic dispatching.  But the
454    /// overhead is OK if we call it a block at a time because scanning the VO bits will dominate
455    /// the execution time.  For LOS, it will be cheaper to enumerate individual objects than
456    /// scanning VO bits because it is sparse.
457    fn enumerate_objects(&self, enumerator: &mut dyn ObjectEnumerator);
458
459    fn set_allocate_as_live(&self, live: bool) {
460        self.common()
461            .allocate_as_live
462            .store(live, std::sync::atomic::Ordering::SeqCst);
463    }
464
465    fn should_allocate_as_live(&self) -> bool {
466        self.common()
467            .allocate_as_live
468            .load(std::sync::atomic::Ordering::Acquire)
469    }
470
471    /// Clear the side log bits for allocated regions in this space.
472    /// This method is only called if the plan knows the log bits are side metadata.
473    fn clear_side_log_bits(&self);
474
475    /// Set the side log bits for allocated regions in this space.
476    /// This method is only called if the plan knows the log bits are side metadata.
477    fn set_side_log_bits(&self);
478}
479
480/// Print the VM map for a space.
481/// Space needs to be object-safe, so it cannot have methods that use extra generic type paramters. So this method is placed outside the Space trait.
482/// This method can be invoked on a &dyn Space (space.as_space() will return &dyn Space).
483#[allow(unused)]
484pub(crate) fn print_vm_map<VM: VMBinding>(
485    space: &dyn Space<VM>,
486    out: &mut impl std::fmt::Write,
487) -> Result<(), std::fmt::Error> {
488    let common = space.common();
489    write!(out, "{} ", common.name)?;
490    if common.immortal {
491        write!(out, "I")?;
492    } else {
493        write!(out, " ")?;
494    }
495    if common.movable {
496        write!(out, " ")?;
497    } else {
498        write!(out, "N")?;
499    }
500    write!(out, " ")?;
501    if common.contiguous {
502        write!(
503            out,
504            "{}->{}",
505            common.start,
506            common.start + common.extent - 1
507        )?;
508        match common.vmrequest {
509            VMRequest::Extent { extent, .. } => {
510                write!(out, " E {}", extent)?;
511            }
512            VMRequest::Fraction { frac, .. } => {
513                write!(out, " F {}", frac)?;
514            }
515            _ => {}
516        }
517    } else {
518        let mut a = space
519            .get_page_resource()
520            .common()
521            .get_head_discontiguous_region();
522        while !a.is_zero() {
523            write!(
524                out,
525                "{}->{}",
526                a,
527                a + space.common().vm_map().get_contiguous_region_size(a) - 1
528            )?;
529            a = space.common().vm_map().get_next_contiguous_region(a);
530            if !a.is_zero() {
531                write!(out, " ")?;
532            }
533        }
534    }
535    writeln!(out)?;
536
537    Ok(())
538}
539
540impl_downcast!(Space<VM> where VM: VMBinding);
541
542pub struct CommonSpace<VM: VMBinding> {
543    pub name: &'static str,
544    pub descriptor: SpaceDescriptor,
545    pub vmrequest: VMRequest,
546
547    /// For a copying space that allows sft_trace_object(), this should be set before each GC so we know
548    // the copy semantics for the space.
549    pub copy: Option<CopySemantics>,
550
551    pub immortal: bool,
552    pub movable: bool,
553    pub contiguous: bool,
554    pub zeroed: bool,
555
556    pub permission_exec: bool,
557
558    pub start: Address,
559    pub extent: usize,
560
561    pub vm_map: &'static dyn VMMap,
562    pub mmapper: &'static dyn Mmapper,
563
564    pub(crate) metadata: SideMetadataContext,
565
566    /// This field equals to needs_log_bit in the plan constraints.
567    // TODO: This should be a constant for performance.
568    pub needs_log_bit: bool,
569    pub unlog_allocated_object: bool,
570    pub unlog_traced_object: bool,
571
572    /// A lock used during acquire() to make sure only one thread can allocate.
573    pub acquire_lock: Mutex<()>,
574
575    pub gc_trigger: Arc<GCTrigger<VM>>,
576    pub global_state: Arc<GlobalState>,
577    pub options: Arc<Options>,
578
579    pub allocate_as_live: AtomicBool,
580
581    p: PhantomData<VM>,
582}
583
584/// Arguments passed from a policy to create a space. This includes policy specific args.
585pub struct PolicyCreateSpaceArgs<'a, VM: VMBinding> {
586    pub plan_args: PlanCreateSpaceArgs<'a, VM>,
587    pub movable: bool,
588    pub immortal: bool,
589    pub local_side_metadata_specs: Vec<SideMetadataSpec>,
590}
591
592/// Arguments passed from a plan to create a space.
593pub struct PlanCreateSpaceArgs<'a, VM: VMBinding> {
594    pub name: &'static str,
595    pub zeroed: bool,
596    pub permission_exec: bool,
597    pub unlog_allocated_object: bool,
598    pub unlog_traced_object: bool,
599    pub vmrequest: VMRequest,
600    pub global_side_metadata_specs: Vec<SideMetadataSpec>,
601    pub vm_map: &'static dyn VMMap,
602    pub mmapper: &'static dyn Mmapper,
603    pub heap: &'a mut HeapMeta,
604    pub constraints: &'a PlanConstraints,
605    pub gc_trigger: Arc<GCTrigger<VM>>,
606    pub scheduler: Arc<GCWorkScheduler<VM>>,
607    pub options: Arc<Options>,
608    pub global_state: Arc<GlobalState>,
609}
610
611impl<'a, VM: VMBinding> PlanCreateSpaceArgs<'a, VM> {
612    /// Turning PlanCreateSpaceArgs into a PolicyCreateSpaceArgs
613    pub fn into_policy_args(
614        self,
615        movable: bool,
616        immortal: bool,
617        policy_metadata_specs: Vec<SideMetadataSpec>,
618    ) -> PolicyCreateSpaceArgs<'a, VM> {
619        PolicyCreateSpaceArgs {
620            movable,
621            immortal,
622            local_side_metadata_specs: policy_metadata_specs,
623            plan_args: self,
624        }
625    }
626}
627
628impl<VM: VMBinding> CommonSpace<VM> {
629    pub fn new(args: PolicyCreateSpaceArgs<VM>) -> Self {
630        let mut rtn = CommonSpace {
631            name: args.plan_args.name,
632            descriptor: SpaceDescriptor::UNINITIALIZED,
633            vmrequest: args.plan_args.vmrequest,
634            copy: None,
635            immortal: args.immortal,
636            movable: args.movable,
637            contiguous: true,
638            permission_exec: args.plan_args.permission_exec,
639            zeroed: args.plan_args.zeroed,
640            start: unsafe { Address::zero() },
641            extent: 0,
642            vm_map: args.plan_args.vm_map,
643            mmapper: args.plan_args.mmapper,
644            needs_log_bit: args.plan_args.constraints.needs_log_bit,
645            unlog_allocated_object: args.plan_args.unlog_allocated_object,
646            unlog_traced_object: args.plan_args.unlog_traced_object,
647            gc_trigger: args.plan_args.gc_trigger,
648            metadata: SideMetadataContext {
649                global: args.plan_args.global_side_metadata_specs,
650                local: args.local_side_metadata_specs,
651            },
652            acquire_lock: Mutex::new(()),
653            global_state: args.plan_args.global_state,
654            options: args.plan_args.options.clone(),
655            allocate_as_live: AtomicBool::new(false),
656            p: PhantomData,
657        };
658
659        let vmrequest = args.plan_args.vmrequest;
660        if vmrequest.is_discontiguous() {
661            rtn.contiguous = false;
662            // FIXME
663            rtn.descriptor = SpaceDescriptor::create_descriptor();
664            // VM.memory.setHeapRange(index, HEAP_START, HEAP_END);
665            return rtn;
666        }
667
668        let (extent, top) = match vmrequest {
669            VMRequest::Fraction { frac, top: _top } => (get_frac_available(frac), _top),
670            VMRequest::Extent {
671                extent: _extent,
672                top: _top,
673            } => (_extent, _top),
674            VMRequest::Fixed {
675                extent: _extent, ..
676            } => (_extent, false),
677            _ => unreachable!(),
678        };
679
680        assert!(
681            extent == raw_align_up(extent, BYTES_IN_CHUNK),
682            "{} requested non-aligned extent: {} bytes",
683            rtn.name,
684            extent
685        );
686
687        let start = if let VMRequest::Fixed { start: _start, .. } = vmrequest {
688            _start
689        } else {
690            // FIXME
691            //if (HeapLayout.vmMap.isFinalized()) VM.assertions.fail("heap is narrowed after regionMap is finalized: " + name);
692            args.plan_args.heap.reserve(extent, top)
693        };
694        assert!(
695            start == chunk_align_up(start),
696            "{} starting on non-aligned boundary: {}",
697            rtn.name,
698            start
699        );
700
701        rtn.contiguous = true;
702        rtn.start = start;
703        rtn.extent = extent;
704        // FIXME
705        rtn.descriptor = SpaceDescriptor::create_descriptor_from_heap_range(start, start + extent);
706        // VM.memory.setHeapRange(index, start, start.plus(extent));
707
708        // We only initialize our vm map if the range of the space is in our available heap range. For normally spaces,
709        // they are definitely in our heap range. But for VM space, a runtime could give us an arbitrary range. We only
710        // insert into our vm map if the range overlaps with our heap.
711        {
712            use crate::util::heap::layout;
713            let overlap =
714                Address::range_intersection(&(start..start + extent), &layout::available_range());
715            if !overlap.is_empty() {
716                args.plan_args.vm_map.insert(
717                    overlap.start,
718                    overlap.end - overlap.start,
719                    rtn.descriptor,
720                );
721            }
722        }
723
724        // For contiguous space, we know its address range so we reserve metadata memory for its range.
725        rtn.metadata
726            .try_map_metadata_address_range(rtn.start, rtn.extent, rtn.name)
727            .unwrap_or_else(|e| {
728                // TODO(Javad): handle meta space allocation failure
729                panic!("failed to mmap meta memory: {e}");
730            });
731
732        debug!(
733            "Created space {} [{}, {}) for {} bytes",
734            rtn.name,
735            start,
736            start + extent,
737            extent
738        );
739
740        rtn
741    }
742
743    pub fn initialize_sft(
744        &self,
745        sft: &(dyn SFT + Sync + 'static),
746        sft_map: &mut dyn crate::policy::sft_map::SFTMap,
747    ) {
748        // We have to keep this for now: if a space is contiguous, our page resource will NOT consider newly allocated chunks
749        // as new chunks (new_chunks = true). In that case, in grow_space(), we do not set SFT when new_chunks = false.
750        // We can fix this by either of these:
751        // * fix page resource, so it propelry returns new_chunk
752        // * change grow_space() so it sets SFT no matter what the new_chunks value is.
753        // FIXME: eagerly initializing SFT is not a good idea.
754        if self.contiguous {
755            unsafe { sft_map.eager_initialize(sft, self.start, self.extent) };
756        }
757    }
758
759    pub fn vm_map(&self) -> &'static dyn VMMap {
760        self.vm_map
761    }
762
763    pub fn mmap_protection(&self) -> MmapProtection {
764        if self.permission_exec || cfg!(feature = "exec_permission_on_all_spaces") {
765            MmapProtection::ReadWriteExec
766        } else {
767            MmapProtection::ReadWrite
768        }
769    }
770
771    pub(crate) fn debug_print_object_global_info(&self, object: ObjectReference) {
772        #[cfg(feature = "vo_bit")]
773        println!(
774            "vo bit = {}",
775            crate::util::metadata::vo_bit::is_vo_bit_set(object)
776        );
777        if self.needs_log_bit {
778            use crate::vm::object_model::ObjectModel;
779            use std::sync::atomic::Ordering;
780            println!(
781                "log bit = {}",
782                VM::VMObjectModel::GLOBAL_LOG_BIT_SPEC.is_unlogged::<VM>(object, Ordering::Relaxed),
783            );
784        }
785        println!("is live = {}", object.is_live());
786    }
787}
788
789fn get_frac_available(frac: f32) -> usize {
790    trace!("AVAILABLE_START={}", vm_layout().available_start());
791    trace!("AVAILABLE_END={}", vm_layout().available_end());
792    let bytes = (frac * vm_layout().available_bytes() as f32) as usize;
793    trace!("bytes={}*{}={}", frac, vm_layout().available_bytes(), bytes);
794    let mb = bytes >> LOG_BYTES_IN_MBYTE;
795    let rtn = mb << LOG_BYTES_IN_MBYTE;
796    trace!("rtn={}", rtn);
797    let aligned_rtn = raw_align_up(rtn, BYTES_IN_CHUNK);
798    trace!("aligned_rtn={}", aligned_rtn);
799    aligned_rtn
800}
801
802pub fn required_chunks(pages: usize) -> usize {
803    let extent = raw_align_up(pages_to_bytes(pages), BYTES_IN_CHUNK);
804    extent >> LOG_BYTES_IN_CHUNK
805}