mmtk/policy/
space.rs

1use crate::global_state::GlobalState;
2use crate::plan::PlanConstraints;
3use crate::scheduler::GCWorkScheduler;
4use crate::util::conversions::*;
5use crate::util::metadata::side_metadata::{
6    SideMetadataContext, SideMetadataSanity, SideMetadataSpec,
7};
8use crate::util::object_enum::ObjectEnumerator;
9use crate::util::Address;
10use crate::util::ObjectReference;
11
12use crate::util::heap::layout::vm_layout::{vm_layout, LOG_BYTES_IN_CHUNK};
13use crate::util::heap::{PageResource, VMRequest};
14use crate::util::options::Options;
15use crate::vm::{ActivePlan, Collection};
16
17use crate::util::constants::{LOG_BYTES_IN_MBYTE, LOG_BYTES_IN_PAGE};
18use crate::util::conversions;
19use crate::util::opaque_pointer::*;
20
21use crate::mmtk::SFT_MAP;
22#[cfg(debug_assertions)]
23use crate::policy::sft::EMPTY_SFT_NAME;
24use crate::policy::sft::SFT;
25use crate::util::alloc::allocator::AllocationOptions;
26use crate::util::copy::*;
27use crate::util::heap::gc_trigger::GCTrigger;
28use crate::util::heap::layout::vm_layout::BYTES_IN_CHUNK;
29use crate::util::heap::layout::Mmapper;
30use crate::util::heap::layout::VMMap;
31use crate::util::heap::space_descriptor::SpaceDescriptor;
32use crate::util::heap::HeapMeta;
33use crate::util::memory::{self, HugePageSupport, MmapProtection, MmapStrategy};
34use crate::vm::VMBinding;
35
36use std::marker::PhantomData;
37use std::sync::atomic::AtomicBool;
38use std::sync::Arc;
39use std::sync::Mutex;
40
41use downcast_rs::Downcast;
42
43pub trait Space<VM: VMBinding>: 'static + SFT + Sync + Downcast {
44    fn as_space(&self) -> &dyn Space<VM>;
45    fn as_sft(&self) -> &(dyn SFT + Sync + 'static);
46    fn get_page_resource(&self) -> &dyn PageResource<VM>;
47
48    /// Get a mutable reference to the underlying page resource, or `None` if the space does not
49    /// have a page resource.
50    fn maybe_get_page_resource_mut(&mut self) -> Option<&mut dyn PageResource<VM>>;
51
52    /// Initialize entires in SFT map for the space. This is called when the Space object
53    /// has a non-moving address, as we will use the address to set sft.
54    /// Currently after we create a boxed plan, spaces in the plan have a non-moving address.
55    fn initialize_sft(&self, sft_map: &mut dyn crate::policy::sft_map::SFTMap);
56
57    /// A check for the obvious out-of-memory case: if the requested size is larger than
58    /// the heap size, it is definitely an OOM. We would like to identify that, and
59    /// allows the binding to deal with OOM. Without this check, we will attempt
60    /// to allocate from the page resource. If the requested size is unrealistically large
61    /// (such as `usize::MAX`), it breaks the assumptions of our implementation of
62    /// page resource, vm map, etc. This check prevents that, and allows us to
63    /// handle the OOM case.
64    /// Each allocator that may request an arbitrary size should call this method before
65    /// acquring memory from the space. For example, bump pointer allocator and large object
66    /// allocator need to call this method. On the other hand, allocators that only allocate
67    /// memory in fixed size blocks do not need to call this method.
68    /// An allocator should call this method before doing any computation on the size to
69    /// avoid arithmatic overflow. If we have to do computation in the allocation fastpath and
70    /// overflow happens there, there is nothing we can do about it.
71    /// Return a boolean to indicate if we will be out of memory, determined by the check.
72    fn will_oom_on_acquire(&self, size: usize) -> bool {
73        let max_pages = self.get_gc_trigger().policy.get_max_heap_size_in_pages();
74        let requested_pages = size >> LOG_BYTES_IN_PAGE;
75        requested_pages > max_pages
76    }
77
78    /// Check if the requested `size` is an obvious out-of-memory case using
79    /// [`Self::will_oom_on_acquire`] and, if it is, call `Collection::out_of_memory`.  Return the
80    /// result of `will_oom_on_acquire`.
81    fn handle_obvious_oom_request(
82        &self,
83        tls: VMThread,
84        size: usize,
85        alloc_options: AllocationOptions,
86    ) -> bool {
87        if self.will_oom_on_acquire(size) {
88            if alloc_options.allow_oom_call {
89                VM::VMCollection::out_of_memory(
90                    tls,
91                    crate::util::alloc::AllocationError::HeapOutOfMemory,
92                );
93            }
94            return true;
95        }
96        false
97    }
98
99    fn acquire(&self, tls: VMThread, pages: usize, alloc_options: AllocationOptions) -> Address {
100        trace!(
101            "Space.acquire, tls={:?}, alloc_options={:?}",
102            tls,
103            alloc_options
104        );
105
106        debug_assert!(
107            !self.will_oom_on_acquire(pages << LOG_BYTES_IN_PAGE),
108            "The requested pages is larger than the max heap size. Is will_go_oom_on_acquire used before acquring memory?"
109        );
110
111        trace!("Reserving pages");
112        let pr = self.get_page_resource();
113        let pages_reserved = pr.reserve_pages(pages);
114        trace!("Pages reserved");
115
116        // Should we poll before acquring pages from page resources so that it can trigger a GC?
117        // - If tls is collector, we cannot attempt a GC.
118        let should_poll = VM::VMActivePlan::is_mutator(tls);
119
120        // If we should poll, do it now.  Record if it has triggered a GC.
121        // If we should not poll, GC is not triggered.
122        let gc_triggered = should_poll && {
123            trace!("Polling ..");
124            self.get_gc_trigger().poll(false, Some(self.as_space()))
125        };
126
127        // We can try to get pages if
128        // - GC is not triggered, or
129        // - GC is triggered, but we allow over-committing.
130        let should_get_pages = !gc_triggered || alloc_options.allow_overcommit;
131
132        // Get new pages if we should. If we didn't get new pages from the page resource for any
133        // reason (if we decided not to, or if we tried and failed), this function shall return a
134        // null address.
135        if should_get_pages {
136            if let Some(addr) = self.get_new_pages_and_initialize(tls, pages, pr, pages_reserved) {
137                addr
138            } else {
139                self.not_acquiring(tls, alloc_options, pr, pages_reserved, true);
140                Address::ZERO
141            }
142        } else {
143            self.not_acquiring(tls, alloc_options, pr, pages_reserved, false);
144            Address::ZERO
145        }
146    }
147
148    /// Get new pages from the page resource, and do necessary initialization, including mmapping
149    /// and zeroing the memory.
150    ///
151    /// The caller must have reserved pages from the page resource.  If successfully acquired pages
152    /// from the page resource, the reserved pages will be committed.
153    ///
154    /// Returns `None` if failed to acquire memory from the page resource.  The caller should call
155    /// `pr.clear_request`.
156    fn get_new_pages_and_initialize(
157        &self,
158        tls: VMThread,
159        pages: usize,
160        pr: &dyn PageResource<VM>,
161        pages_reserved: usize,
162    ) -> Option<Address> {
163        // We need this lock: Othrewise, it is possible that one thread acquires pages in a new chunk, but not yet
164        // set SFT for it (in grow_space()), and another thread acquires pages in the same chunk, which is not
165        // a new chunk so grow_space() won't be called on it. The second thread could return a result in the chunk before
166        // its SFT is properly set.
167        // We need to minimize the scope of this lock for performance when we have many threads (mutator threads, or GC threads with copying allocators).
168        // See: https://github.com/mmtk/mmtk-core/issues/610
169        let lock = self.common().acquire_lock.lock().unwrap();
170
171        let Ok(res) = pr.get_new_pages(self.common().descriptor, pages_reserved, pages, tls) else {
172            return None;
173        };
174
175        debug!(
176            "Got new pages {} ({} pages) for {} in chunk {}, new_chunk? {}",
177            res.start,
178            res.pages,
179            self.get_name(),
180            conversions::chunk_align_down(res.start),
181            res.new_chunk
182        );
183        let bytes = conversions::pages_to_bytes(res.pages);
184
185        let mmap = || {
186            // Mmap the pages and the side metadata, and handle error. In case of any error,
187            // we will either call back to the VM for OOM, or simply panic.
188            if let Err(mmap_error) = self
189                .common()
190                .mmapper
191                .ensure_mapped(
192                    res.start,
193                    res.pages,
194                    self.common().mmap_strategy(),
195                    &memory::MmapAnnotation::Space {
196                        name: self.get_name(),
197                    },
198                )
199                .and(self.common().metadata.try_map_metadata_space(
200                    res.start,
201                    bytes,
202                    self.get_name(),
203                ))
204            {
205                memory::handle_mmap_error::<VM>(mmap_error, tls, res.start, bytes);
206            }
207        };
208        let grow_space = || {
209            self.grow_space(res.start, bytes, res.new_chunk);
210        };
211
212        // The scope of the lock is important in terms of performance when we have many allocator threads.
213        if SFT_MAP.get_side_metadata().is_some() {
214            // If the SFT map uses side metadata, so we have to initialize side metadata first.
215            mmap();
216            // then grow space, which will use the side metadata we mapped above
217            grow_space();
218            // then we can drop the lock after grow_space()
219            drop(lock);
220        } else {
221            // In normal cases, we can drop lock immediately after grow_space()
222            grow_space();
223            drop(lock);
224            // and map side metadata without holding the lock
225            mmap();
226        }
227
228        // TODO: Concurrent zeroing
229        if self.common().zeroed {
230            memory::zero(res.start, bytes);
231        }
232
233        // Some assertions
234        {
235            // --- Assert the start of the allocated region ---
236            // The start address SFT should be correct.
237            debug_assert_eq!(SFT_MAP.get_checked(res.start).name(), self.get_name());
238            // The start address is in our space.
239            debug_assert!(self.address_in_space(res.start));
240            // The descriptor should be correct.
241            debug_assert_eq!(
242                self.common().vm_map().get_descriptor_for_address(res.start),
243                self.common().descriptor
244            );
245
246            // --- Assert the last byte in the allocated region ---
247            let last_byte = res.start + bytes - 1;
248            // The SFT for the last byte in the allocated memory should be correct.
249            debug_assert_eq!(SFT_MAP.get_checked(last_byte).name(), self.get_name());
250            // The last byte in the allocated memory should be in this space.
251            debug_assert!(self.address_in_space(last_byte));
252            // The descriptor for the last byte should be correct.
253            debug_assert_eq!(
254                self.common().vm_map().get_descriptor_for_address(last_byte),
255                self.common().descriptor
256            );
257        }
258
259        debug!("Space.acquire(), returned = {}", res.start);
260        Some(res.start)
261    }
262
263    /// Handle the case where [`Space::acquire`] will not or can not acquire pages from the page
264    /// resource.  This may happen when
265    /// -   GC is triggered and the allocation does not allow over-committing, or
266    /// -   the allocation tried to acquire pages from the page resource but ran out of physical
267    ///     memory.
268    fn not_acquiring(
269        &self,
270        tls: VMThread,
271        alloc_options: AllocationOptions,
272        pr: &dyn PageResource<VM>,
273        pages_reserved: usize,
274        attempted_allocation_and_failed: bool,
275    ) {
276        assert!(
277            VM::VMActivePlan::is_mutator(tls),
278            "A non-mutator thread failed to get pages from page resource.  \
279            Copying GC plans should compute the copying headroom carefully to prevent this."
280        );
281
282        // Clear the request
283        pr.clear_request(pages_reserved);
284
285        // If we are not at a safepoint, return immediately.
286        if !alloc_options.at_safepoint {
287            return;
288        }
289
290        debug!("Collection required");
291
292        if !self.common().global_state.is_initialized() {
293            // Otherwise do GC here
294            panic!(
295                "GC is not allowed here: collection is not initialized \
296                    (did you call initialize_collection()?).  \
297                    Out of physical memory: {phy}",
298                phy = attempted_allocation_and_failed
299            );
300        }
301
302        if attempted_allocation_and_failed {
303            // We thought we had memory to allocate, but somehow failed the allocation. Will force a GC.
304            let gc_performed = self.get_gc_trigger().poll(true, Some(self.as_space()));
305            debug_assert!(gc_performed, "GC not performed when forced.");
306        }
307
308        // Inform GC trigger about the pending allocation.
309        let meta_pages_reserved = self.estimate_side_meta_pages(pages_reserved);
310        let total_pages_reserved = pages_reserved + meta_pages_reserved;
311        self.get_gc_trigger()
312            .policy
313            .on_pending_allocation(total_pages_reserved);
314
315        VM::VMCollection::block_for_gc(VMMutatorThread(tls)); // We have checked that this is mutator
316    }
317
318    fn address_in_space(&self, start: Address) -> bool {
319        if !self.common().descriptor.is_contiguous() {
320            self.common().vm_map().get_descriptor_for_address(start) == self.common().descriptor
321        } else {
322            start >= self.common().start && start < self.common().start + self.common().extent
323        }
324    }
325
326    fn in_space(&self, object: ObjectReference) -> bool {
327        self.address_in_space(object.to_raw_address())
328    }
329
330    /**
331     * This is called after we get result from page resources.  The space may
332     * tap into the hook to monitor heap growth.  The call is made from within the
333     * page resources' critical region, immediately before yielding the lock.
334     *
335     * @param start The start of the newly allocated space
336     * @param bytes The size of the newly allocated space
337     * @param new_chunk {@code true} if the new space encroached upon or started a new chunk or chunks.
338     */
339    fn grow_space(&self, start: Address, bytes: usize, new_chunk: bool) {
340        trace!(
341            "Grow space from {} for {} bytes (new chunk = {})",
342            start,
343            bytes,
344            new_chunk
345        );
346
347        // If this is not a new chunk, the SFT for [start, start + bytes) should alreayd be initialized.
348        #[cfg(debug_assertions)]
349        if !new_chunk {
350            debug_assert!(
351                SFT_MAP.get_checked(start).name() != EMPTY_SFT_NAME,
352                "In grow_space(start = {}, bytes = {}, new_chunk = {}), we have empty SFT entries (chunk for {} = {})",
353                start,
354                bytes,
355                new_chunk,
356                start,
357                SFT_MAP.get_checked(start).name()
358            );
359            debug_assert!(
360                SFT_MAP.get_checked(start + bytes - 1).name() != EMPTY_SFT_NAME,
361                "In grow_space(start = {}, bytes = {}, new_chunk = {}), we have empty SFT entries (chunk for {} = {})",
362                start,
363                bytes,
364                new_chunk,
365                start + bytes - 1,
366                SFT_MAP.get_checked(start + bytes - 1).name()
367            );
368        }
369
370        if new_chunk {
371            unsafe { SFT_MAP.update(self.as_sft(), start, bytes) };
372        }
373    }
374
375    /// Estimate the amount of side metadata memory needed for a give data memory size in pages. The
376    /// result will over-estimate the amount of metadata pages needed, with at least one page per
377    /// side metadata.  This relatively accurately describes the number of side metadata pages the
378    /// space actually consumes.
379    ///
380    /// This function is used for both triggering GC (via [`Space::reserved_pages`]) and resizing
381    /// the heap (via [`crate::util::heap::GCTriggerPolicy::on_pending_allocation`]).
382    fn estimate_side_meta_pages(&self, data_pages: usize) -> usize {
383        self.common().metadata.calculate_reserved_pages(data_pages)
384    }
385
386    fn reserved_pages(&self) -> usize {
387        let data_pages = self.get_page_resource().reserved_pages();
388        let meta_pages = self.estimate_side_meta_pages(data_pages);
389        data_pages + meta_pages
390    }
391
392    /// Return the number of physical pages available.
393    fn available_physical_pages(&self) -> usize {
394        self.get_page_resource().get_available_physical_pages()
395    }
396
397    fn get_name(&self) -> &'static str {
398        self.common().name
399    }
400
401    fn get_descriptor(&self) -> SpaceDescriptor {
402        self.common().descriptor
403    }
404
405    fn common(&self) -> &CommonSpace<VM>;
406    fn get_gc_trigger(&self) -> &GCTrigger<VM> {
407        self.common().gc_trigger.as_ref()
408    }
409
410    fn release_multiple_pages(&mut self, start: Address);
411
412    /// What copy semantic we should use for this space if we copy objects from this space.
413    /// This is only needed for plans that use SFTProcessEdges
414    fn set_copy_for_sft_trace(&mut self, _semantics: Option<CopySemantics>) {
415        panic!("A copying space should override this method")
416    }
417
418    /// Ensure that the current space's metadata context does not have any issues.
419    /// Panics with a suitable message if any issue is detected.
420    /// It also initialises the sanity maps which will then be used if the `extreme_assertions` feature is active.
421    /// Internally this calls verify_metadata_context() from `util::metadata::sanity`
422    ///
423    /// This function is called once per space by its parent plan but may be called multiple times per policy.
424    ///
425    /// Arguments:
426    /// * `side_metadata_sanity_checker`: The `SideMetadataSanity` object instantiated in the calling plan.
427    fn verify_side_metadata_sanity(&self, side_metadata_sanity_checker: &mut SideMetadataSanity) {
428        side_metadata_sanity_checker
429            .verify_metadata_context(std::any::type_name::<Self>(), &self.common().metadata)
430    }
431
432    /// Enumerate objects in the current space.
433    ///
434    /// Implementers can use the `enumerator` to report
435    ///
436    /// -   individual objects within the space using `enumerator.visit_object`, and
437    /// -   ranges of address that may contain objects using `enumerator.visit_address_range`. The
438    ///     caller will then enumerate objects in the range using the VO bits metadata.
439    ///
440    /// Each object in the space shall be covered by one of the two methods above.
441    ///
442    /// # Implementation considerations
443    ///
444    /// **Skipping empty ranges**: When enumerating address ranges, spaces can skip ranges (blocks,
445    /// chunks, etc.) that are guarenteed not to contain objects.
446    ///
447    /// **Dynamic dispatch**: Because `Space` is a trait object type and `enumerator` is a `dyn`
448    /// reference, invoking methods of `enumerator` involves a dynamic dispatching.  But the
449    /// overhead is OK if we call it a block at a time because scanning the VO bits will dominate
450    /// the execution time.  For LOS, it will be cheaper to enumerate individual objects than
451    /// scanning VO bits because it is sparse.
452    fn enumerate_objects(&self, enumerator: &mut dyn ObjectEnumerator);
453
454    fn set_allocate_as_live(&self, live: bool) {
455        self.common()
456            .allocate_as_live
457            .store(live, std::sync::atomic::Ordering::SeqCst);
458    }
459
460    fn should_allocate_as_live(&self) -> bool {
461        self.common()
462            .allocate_as_live
463            .load(std::sync::atomic::Ordering::Acquire)
464    }
465
466    /// Clear the side log bits for allocated regions in this space.
467    /// This method is only called if the plan knows the log bits are side metadata.
468    fn clear_side_log_bits(&self);
469
470    /// Set the side log bits for allocated regions in this space.
471    /// This method is only called if the plan knows the log bits are side metadata.
472    fn set_side_log_bits(&self);
473}
474
475/// Print the VM map for a space.
476/// Space needs to be object-safe, so it cannot have methods that use extra generic type paramters. So this method is placed outside the Space trait.
477/// This method can be invoked on a &dyn Space (space.as_space() will return &dyn Space).
478#[allow(unused)]
479pub(crate) fn print_vm_map<VM: VMBinding>(
480    space: &dyn Space<VM>,
481    out: &mut impl std::fmt::Write,
482) -> Result<(), std::fmt::Error> {
483    let common = space.common();
484    write!(out, "{} ", common.name)?;
485    if common.immortal {
486        write!(out, "I")?;
487    } else {
488        write!(out, " ")?;
489    }
490    if common.movable {
491        write!(out, " ")?;
492    } else {
493        write!(out, "N")?;
494    }
495    write!(out, " ")?;
496    if common.contiguous {
497        write!(
498            out,
499            "{}->{}",
500            common.start,
501            common.start + common.extent - 1
502        )?;
503        match common.vmrequest {
504            VMRequest::Extent { extent, .. } => {
505                write!(out, " E {}", extent)?;
506            }
507            VMRequest::Fraction { frac, .. } => {
508                write!(out, " F {}", frac)?;
509            }
510            _ => {}
511        }
512    } else {
513        let mut a = space
514            .get_page_resource()
515            .common()
516            .get_head_discontiguous_region();
517        while !a.is_zero() {
518            write!(
519                out,
520                "{}->{}",
521                a,
522                a + space.common().vm_map().get_contiguous_region_size(a) - 1
523            )?;
524            a = space.common().vm_map().get_next_contiguous_region(a);
525            if !a.is_zero() {
526                write!(out, " ")?;
527            }
528        }
529    }
530    writeln!(out)?;
531
532    Ok(())
533}
534
535impl_downcast!(Space<VM> where VM: VMBinding);
536
537pub struct CommonSpace<VM: VMBinding> {
538    pub name: &'static str,
539    pub descriptor: SpaceDescriptor,
540    pub vmrequest: VMRequest,
541
542    /// For a copying space that allows sft_trace_object(), this should be set before each GC so we know
543    // the copy semantics for the space.
544    pub copy: Option<CopySemantics>,
545
546    pub immortal: bool,
547    pub movable: bool,
548    pub contiguous: bool,
549    pub zeroed: bool,
550
551    pub permission_exec: bool,
552
553    pub start: Address,
554    pub extent: usize,
555
556    pub vm_map: &'static dyn VMMap,
557    pub mmapper: &'static dyn Mmapper,
558
559    pub(crate) metadata: SideMetadataContext,
560
561    /// This field equals to needs_log_bit in the plan constraints.
562    // TODO: This should be a constant for performance.
563    pub needs_log_bit: bool,
564    pub unlog_allocated_object: bool,
565    pub unlog_traced_object: bool,
566
567    /// A lock used during acquire() to make sure only one thread can allocate.
568    pub acquire_lock: Mutex<()>,
569
570    pub gc_trigger: Arc<GCTrigger<VM>>,
571    pub global_state: Arc<GlobalState>,
572    pub options: Arc<Options>,
573
574    pub allocate_as_live: AtomicBool,
575
576    p: PhantomData<VM>,
577}
578
579/// Arguments passed from a policy to create a space. This includes policy specific args.
580pub struct PolicyCreateSpaceArgs<'a, VM: VMBinding> {
581    pub plan_args: PlanCreateSpaceArgs<'a, VM>,
582    pub movable: bool,
583    pub immortal: bool,
584    pub local_side_metadata_specs: Vec<SideMetadataSpec>,
585}
586
587/// Arguments passed from a plan to create a space.
588pub struct PlanCreateSpaceArgs<'a, VM: VMBinding> {
589    pub name: &'static str,
590    pub zeroed: bool,
591    pub permission_exec: bool,
592    pub unlog_allocated_object: bool,
593    pub unlog_traced_object: bool,
594    pub vmrequest: VMRequest,
595    pub global_side_metadata_specs: Vec<SideMetadataSpec>,
596    pub vm_map: &'static dyn VMMap,
597    pub mmapper: &'static dyn Mmapper,
598    pub heap: &'a mut HeapMeta,
599    pub constraints: &'a PlanConstraints,
600    pub gc_trigger: Arc<GCTrigger<VM>>,
601    pub scheduler: Arc<GCWorkScheduler<VM>>,
602    pub options: Arc<Options>,
603    pub global_state: Arc<GlobalState>,
604}
605
606impl<'a, VM: VMBinding> PlanCreateSpaceArgs<'a, VM> {
607    /// Turning PlanCreateSpaceArgs into a PolicyCreateSpaceArgs
608    pub fn into_policy_args(
609        self,
610        movable: bool,
611        immortal: bool,
612        policy_metadata_specs: Vec<SideMetadataSpec>,
613    ) -> PolicyCreateSpaceArgs<'a, VM> {
614        PolicyCreateSpaceArgs {
615            movable,
616            immortal,
617            local_side_metadata_specs: policy_metadata_specs,
618            plan_args: self,
619        }
620    }
621}
622
623impl<VM: VMBinding> CommonSpace<VM> {
624    pub fn new(args: PolicyCreateSpaceArgs<VM>) -> Self {
625        let mut rtn = CommonSpace {
626            name: args.plan_args.name,
627            descriptor: SpaceDescriptor::UNINITIALIZED,
628            vmrequest: args.plan_args.vmrequest,
629            copy: None,
630            immortal: args.immortal,
631            movable: args.movable,
632            contiguous: true,
633            permission_exec: args.plan_args.permission_exec,
634            zeroed: args.plan_args.zeroed,
635            start: unsafe { Address::zero() },
636            extent: 0,
637            vm_map: args.plan_args.vm_map,
638            mmapper: args.plan_args.mmapper,
639            needs_log_bit: args.plan_args.constraints.needs_log_bit,
640            unlog_allocated_object: args.plan_args.unlog_allocated_object,
641            unlog_traced_object: args.plan_args.unlog_traced_object,
642            gc_trigger: args.plan_args.gc_trigger,
643            metadata: SideMetadataContext {
644                global: args.plan_args.global_side_metadata_specs,
645                local: args.local_side_metadata_specs,
646            },
647            acquire_lock: Mutex::new(()),
648            global_state: args.plan_args.global_state,
649            options: args.plan_args.options.clone(),
650            allocate_as_live: AtomicBool::new(false),
651            p: PhantomData,
652        };
653
654        let vmrequest = args.plan_args.vmrequest;
655        if vmrequest.is_discontiguous() {
656            rtn.contiguous = false;
657            // FIXME
658            rtn.descriptor = SpaceDescriptor::create_descriptor();
659            // VM.memory.setHeapRange(index, HEAP_START, HEAP_END);
660            return rtn;
661        }
662
663        let (extent, top) = match vmrequest {
664            VMRequest::Fraction { frac, top: _top } => (get_frac_available(frac), _top),
665            VMRequest::Extent {
666                extent: _extent,
667                top: _top,
668            } => (_extent, _top),
669            VMRequest::Fixed {
670                extent: _extent, ..
671            } => (_extent, false),
672            _ => unreachable!(),
673        };
674
675        assert!(
676            extent == raw_align_up(extent, BYTES_IN_CHUNK),
677            "{} requested non-aligned extent: {} bytes",
678            rtn.name,
679            extent
680        );
681
682        let start = if let VMRequest::Fixed { start: _start, .. } = vmrequest {
683            _start
684        } else {
685            // FIXME
686            //if (HeapLayout.vmMap.isFinalized()) VM.assertions.fail("heap is narrowed after regionMap is finalized: " + name);
687            args.plan_args.heap.reserve(extent, top)
688        };
689        assert!(
690            start == chunk_align_up(start),
691            "{} starting on non-aligned boundary: {}",
692            rtn.name,
693            start
694        );
695
696        rtn.contiguous = true;
697        rtn.start = start;
698        rtn.extent = extent;
699        // FIXME
700        rtn.descriptor = SpaceDescriptor::create_descriptor_from_heap_range(start, start + extent);
701        // VM.memory.setHeapRange(index, start, start.plus(extent));
702
703        // We only initialize our vm map if the range of the space is in our available heap range. For normally spaces,
704        // they are definitely in our heap range. But for VM space, a runtime could give us an arbitrary range. We only
705        // insert into our vm map if the range overlaps with our heap.
706        {
707            use crate::util::heap::layout;
708            let overlap =
709                Address::range_intersection(&(start..start + extent), &layout::available_range());
710            if !overlap.is_empty() {
711                args.plan_args.vm_map.insert(
712                    overlap.start,
713                    overlap.end - overlap.start,
714                    rtn.descriptor,
715                );
716            }
717        }
718
719        // For contiguous space, we know its address range so we reserve metadata memory for its range.
720        rtn.metadata
721            .try_map_metadata_address_range(rtn.start, rtn.extent, rtn.name)
722            .unwrap_or_else(|e| {
723                // TODO(Javad): handle meta space allocation failure
724                panic!("failed to mmap meta memory: {e}");
725            });
726
727        debug!(
728            "Created space {} [{}, {}) for {} bytes",
729            rtn.name,
730            start,
731            start + extent,
732            extent
733        );
734
735        rtn
736    }
737
738    pub fn initialize_sft(
739        &self,
740        sft: &(dyn SFT + Sync + 'static),
741        sft_map: &mut dyn crate::policy::sft_map::SFTMap,
742    ) {
743        // We have to keep this for now: if a space is contiguous, our page resource will NOT consider newly allocated chunks
744        // as new chunks (new_chunks = true). In that case, in grow_space(), we do not set SFT when new_chunks = false.
745        // We can fix this by either of these:
746        // * fix page resource, so it propelry returns new_chunk
747        // * change grow_space() so it sets SFT no matter what the new_chunks value is.
748        // FIXME: eagerly initializing SFT is not a good idea.
749        if self.contiguous {
750            unsafe { sft_map.eager_initialize(sft, self.start, self.extent) };
751        }
752    }
753
754    pub fn vm_map(&self) -> &'static dyn VMMap {
755        self.vm_map
756    }
757
758    pub fn mmap_strategy(&self) -> MmapStrategy {
759        MmapStrategy {
760            huge_page: if *self.options.transparent_hugepages {
761                HugePageSupport::TransparentHugePages
762            } else {
763                HugePageSupport::No
764            },
765            prot: if self.permission_exec || cfg!(feature = "exec_permission_on_all_spaces") {
766                MmapProtection::ReadWriteExec
767            } else {
768                MmapProtection::ReadWrite
769            },
770        }
771    }
772
773    pub(crate) fn debug_print_object_global_info(&self, object: ObjectReference) {
774        #[cfg(feature = "vo_bit")]
775        println!(
776            "vo bit = {}",
777            crate::util::metadata::vo_bit::is_vo_bit_set(object)
778        );
779        if self.needs_log_bit {
780            use crate::vm::object_model::ObjectModel;
781            use std::sync::atomic::Ordering;
782            println!(
783                "log bit = {}",
784                VM::VMObjectModel::GLOBAL_LOG_BIT_SPEC.is_unlogged::<VM>(object, Ordering::Relaxed),
785            );
786        }
787        println!("is live = {}", object.is_live());
788    }
789}
790
791fn get_frac_available(frac: f32) -> usize {
792    trace!("AVAILABLE_START={}", vm_layout().available_start());
793    trace!("AVAILABLE_END={}", vm_layout().available_end());
794    let bytes = (frac * vm_layout().available_bytes() as f32) as usize;
795    trace!("bytes={}*{}={}", frac, vm_layout().available_bytes(), bytes);
796    let mb = bytes >> LOG_BYTES_IN_MBYTE;
797    let rtn = mb << LOG_BYTES_IN_MBYTE;
798    trace!("rtn={}", rtn);
799    let aligned_rtn = raw_align_up(rtn, BYTES_IN_CHUNK);
800    trace!("aligned_rtn={}", aligned_rtn);
801    aligned_rtn
802}
803
804pub fn required_chunks(pages: usize) -> usize {
805    let extent = raw_align_up(pages_to_bytes(pages), BYTES_IN_CHUNK);
806    extent >> LOG_BYTES_IN_CHUNK
807}