mmtk/policy/
space.rs

1use crate::global_state::GlobalState;
2use crate::plan::PlanConstraints;
3use crate::scheduler::GCWorkScheduler;
4use crate::util::conversions::*;
5use crate::util::metadata::side_metadata::{
6    SideMetadataContext, SideMetadataSanity, SideMetadataSpec,
7};
8use crate::util::object_enum::ObjectEnumerator;
9use crate::util::Address;
10use crate::util::ObjectReference;
11
12use crate::util::heap::layout::vm_layout::{vm_layout, LOG_BYTES_IN_CHUNK};
13use crate::util::heap::{PageResource, VMRequest};
14use crate::util::options::Options;
15use crate::vm::{ActivePlan, Collection};
16
17use crate::util::constants::LOG_BYTES_IN_MBYTE;
18use crate::util::conversions;
19use crate::util::opaque_pointer::*;
20
21use crate::mmtk::SFT_MAP;
22#[cfg(debug_assertions)]
23use crate::policy::sft::EMPTY_SFT_NAME;
24use crate::policy::sft::SFT;
25use crate::util::alloc::allocator::AllocationOptions;
26use crate::util::copy::*;
27use crate::util::heap::gc_trigger::GCTrigger;
28use crate::util::heap::layout::vm_layout::BYTES_IN_CHUNK;
29use crate::util::heap::layout::Mmapper;
30use crate::util::heap::layout::VMMap;
31use crate::util::heap::space_descriptor::SpaceDescriptor;
32use crate::util::heap::HeapMeta;
33use crate::util::os::*;
34use crate::vm::VMBinding;
35
36use std::marker::PhantomData;
37use std::sync::atomic::AtomicBool;
38use std::sync::Arc;
39use std::sync::Mutex;
40
41use downcast_rs::Downcast;
42
43pub trait Space<VM: VMBinding>: 'static + SFT + Sync + Downcast {
44    fn as_space(&self) -> &dyn Space<VM>;
45    fn as_sft(&self) -> &(dyn SFT + Sync + 'static);
46    fn get_page_resource(&self) -> &dyn PageResource<VM>;
47
48    /// Get a mutable reference to the underlying page resource, or `None` if the space does not
49    /// have a page resource.
50    fn maybe_get_page_resource_mut(&mut self) -> Option<&mut dyn PageResource<VM>>;
51
52    /// Initialize entires in SFT map for the space. This is called when the Space object
53    /// has a non-moving address, as we will use the address to set sft.
54    /// Currently after we create a boxed plan, spaces in the plan have a non-moving address.
55    fn initialize_sft(&self, sft_map: &mut dyn crate::policy::sft_map::SFTMap);
56
57    fn acquire(&self, tls: VMThread, pages: usize, alloc_options: AllocationOptions) -> Address {
58        trace!(
59            "Space.acquire, tls={:?}, alloc_options={:?}",
60            tls,
61            alloc_options
62        );
63
64        debug_assert!(
65            !self.get_gc_trigger().will_oom_on_alloc(pages << crate::util::constants::LOG_BYTES_IN_PAGE),
66            "The requested pages is larger than the max heap size. Is will_go_oom_on_acquire used before acquring memory?"
67        );
68
69        trace!("Reserving pages");
70        let pr = self.get_page_resource();
71        let pages_reserved = pr.reserve_pages(pages);
72        trace!("Pages reserved");
73
74        // Should we poll before acquring pages from page resources so that it can trigger a GC?
75        // - If tls is collector, we cannot attempt a GC.
76        let should_poll = VM::VMActivePlan::is_mutator(tls);
77
78        // If we should poll, do it now.  Record if it has triggered a GC.
79        // If we should not poll, GC is not triggered.
80        let gc_triggered = should_poll && {
81            trace!("Polling ..");
82            self.get_gc_trigger().poll(false, Some(self.as_space()))
83        };
84
85        // We can try to get pages if
86        // - GC is not triggered, or
87        // - GC is triggered, but we allow over-committing.
88        let should_get_pages = !gc_triggered || alloc_options.allow_overcommit;
89
90        // Get new pages if we should. If we didn't get new pages from the page resource for any
91        // reason (if we decided not to, or if we tried and failed), this function shall return a
92        // null address.
93        if should_get_pages {
94            if let Some(addr) = self.get_new_pages_and_initialize(tls, pages, pr, pages_reserved) {
95                addr
96            } else {
97                self.not_acquiring(tls, alloc_options, pr, pages_reserved, true);
98                Address::ZERO
99            }
100        } else {
101            self.not_acquiring(tls, alloc_options, pr, pages_reserved, false);
102            Address::ZERO
103        }
104    }
105
106    /// Get new pages from the page resource, and do necessary initialization, including mmapping
107    /// and zeroing the memory.
108    ///
109    /// The caller must have reserved pages from the page resource.  If successfully acquired pages
110    /// from the page resource, the reserved pages will be committed.
111    ///
112    /// Returns `None` if failed to acquire memory from the page resource.  The caller should call
113    /// `pr.clear_request`.
114    fn get_new_pages_and_initialize(
115        &self,
116        tls: VMThread,
117        pages: usize,
118        pr: &dyn PageResource<VM>,
119        pages_reserved: usize,
120    ) -> Option<Address> {
121        // We need this lock: Othrewise, it is possible that one thread acquires pages in a new chunk, but not yet
122        // set SFT for it (in grow_space()), and another thread acquires pages in the same chunk, which is not
123        // a new chunk so grow_space() won't be called on it. The second thread could return a result in the chunk before
124        // its SFT is properly set.
125        // We need to minimize the scope of this lock for performance when we have many threads (mutator threads, or GC threads with copying allocators).
126        // See: https://github.com/mmtk/mmtk-core/issues/610
127        let lock = self.common().acquire_lock.lock().unwrap();
128
129        let Ok(res) = pr.get_new_pages(self.common().descriptor, pages_reserved, pages, tls) else {
130            return None;
131        };
132
133        debug!(
134            "Got new pages {} ({} pages) for {} in chunk {}, new_chunk? {}",
135            res.start,
136            res.pages,
137            self.get_name(),
138            conversions::chunk_align_down(res.start),
139            res.new_chunk
140        );
141        let bytes = conversions::pages_to_bytes(res.pages);
142        #[cfg(debug_assertions)]
143        self.common()
144            .metadata
145            .assert_metadata_ranges_in_reserved_range(res.start, bytes, self.get_name());
146
147        let mmap = || {
148            // Mmap the pages and the side metadata, and handle error. In case of any error,
149            // we will either call back to the VM for OOM, or simply panic.
150            if let Err(mmap_error) = self
151                .common()
152                .mmapper
153                .ensure_mapped(
154                    res.start,
155                    res.pages,
156                    self.common()
157                        .options
158                        .transparent_hugepages_as_huge_page_support(),
159                    self.common().mmap_protection(),
160                    &MmapAnnotation::Space {
161                        name: self.get_name(),
162                    },
163                )
164                .and(self.common().metadata.try_map_metadata_space(
165                    res.start,
166                    bytes,
167                    self.get_name(),
168                ))
169            {
170                OS::handle_mmap_error::<VM>(mmap_error, tls);
171            }
172        };
173        let grow_space = || {
174            self.grow_space(res.start, bytes, res.new_chunk);
175        };
176
177        // The scope of the lock is important in terms of performance when we have many allocator threads.
178        if SFT_MAP.get_side_metadata().is_some() {
179            // If the SFT map uses side metadata, so we have to initialize side metadata first.
180            mmap();
181            // then grow space, which will use the side metadata we mapped above
182            grow_space();
183            // then we can drop the lock after grow_space()
184            drop(lock);
185        } else {
186            // In normal cases, we can drop lock immediately after grow_space()
187            grow_space();
188            drop(lock);
189            // and map side metadata without holding the lock
190            mmap();
191        }
192
193        // TODO: Concurrent zeroing
194        if self.common().zeroed {
195            crate::util::memory::zero(res.start, bytes);
196        }
197
198        // Some assertions
199        {
200            // --- Assert the start of the allocated region ---
201            // The start address SFT should be correct.
202            debug_assert_eq!(SFT_MAP.get_checked(res.start).name(), self.get_name());
203            // The start address is in our space.
204            debug_assert!(self.address_in_space(res.start));
205            // The descriptor should be correct.
206            debug_assert_eq!(
207                self.common().vm_map().get_descriptor_for_address(res.start),
208                self.common().descriptor
209            );
210
211            // --- Assert the last byte in the allocated region ---
212            let last_byte = res.start + bytes - 1;
213            // The SFT for the last byte in the allocated memory should be correct.
214            debug_assert_eq!(SFT_MAP.get_checked(last_byte).name(), self.get_name());
215            // The last byte in the allocated memory should be in this space.
216            debug_assert!(self.address_in_space(last_byte));
217            // The descriptor for the last byte should be correct.
218            debug_assert_eq!(
219                self.common().vm_map().get_descriptor_for_address(last_byte),
220                self.common().descriptor
221            );
222        }
223
224        debug!("Space.acquire(), returned = {}", res.start);
225        Some(res.start)
226    }
227
228    /// Handle the case where [`Space::acquire`] will not or can not acquire pages from the page
229    /// resource.  This may happen when
230    /// -   GC is triggered and the allocation does not allow over-committing, or
231    /// -   the allocation tried to acquire pages from the page resource but ran out of physical
232    ///     memory.
233    fn not_acquiring(
234        &self,
235        tls: VMThread,
236        alloc_options: AllocationOptions,
237        pr: &dyn PageResource<VM>,
238        pages_reserved: usize,
239        attempted_allocation_and_failed: bool,
240    ) {
241        assert!(
242            VM::VMActivePlan::is_mutator(tls),
243            "A non-mutator thread failed to get pages from page resource.  \
244            Copying GC plans should compute the copying headroom carefully to prevent this."
245        );
246
247        // Clear the request
248        pr.clear_request(pages_reserved);
249
250        // If we are not at a safepoint, return immediately.
251        if !alloc_options.at_safepoint {
252            return;
253        }
254
255        debug!("Collection required");
256
257        if !self.common().global_state.is_initialized() {
258            // Otherwise do GC here
259            panic!(
260                "GC is not allowed here: collection is not initialized \
261                    (did you call initialize_collection()?).  \
262                    Out of physical memory: {phy}",
263                phy = attempted_allocation_and_failed
264            );
265        }
266
267        if attempted_allocation_and_failed {
268            // We thought we had memory to allocate, but somehow failed the allocation. Will force a GC.
269            let gc_performed = self.get_gc_trigger().poll(true, Some(self.as_space()));
270            debug_assert!(gc_performed, "GC not performed when forced.");
271        }
272
273        // Inform GC trigger about the pending allocation.
274        let meta_pages_reserved = self.estimate_side_meta_pages(pages_reserved);
275        let total_pages_reserved = pages_reserved + meta_pages_reserved;
276        self.get_gc_trigger()
277            .policy
278            .on_pending_allocation(total_pages_reserved);
279
280        VM::VMCollection::block_for_gc(VMMutatorThread(tls)); // We have checked that this is mutator
281    }
282
283    fn address_in_space(&self, start: Address) -> bool {
284        if !self.common().descriptor.is_contiguous() {
285            self.common().vm_map().get_descriptor_for_address(start) == self.common().descriptor
286        } else {
287            start >= self.common().start && start < self.common().start + self.common().extent
288        }
289    }
290
291    fn in_space(&self, object: ObjectReference) -> bool {
292        self.address_in_space(object.to_raw_address())
293    }
294
295    /**
296     * This is called after we get result from page resources.  The space may
297     * tap into the hook to monitor heap growth.  The call is made from within the
298     * page resources' critical region, immediately before yielding the lock.
299     *
300     * @param start The start of the newly allocated space
301     * @param bytes The size of the newly allocated space
302     * @param new_chunk {@code true} if the new space encroached upon or started a new chunk or chunks.
303     */
304    fn grow_space(&self, start: Address, bytes: usize, new_chunk: bool) {
305        trace!(
306            "Grow space from {} for {} bytes (new chunk = {})",
307            start,
308            bytes,
309            new_chunk
310        );
311
312        // If this is not a new chunk, the SFT for [start, start + bytes) should alreayd be initialized.
313        #[cfg(debug_assertions)]
314        if !new_chunk {
315            debug_assert!(
316                SFT_MAP.get_checked(start).name() != EMPTY_SFT_NAME,
317                "In grow_space(start = {}, bytes = {}, new_chunk = {}), we have empty SFT entries (chunk for {} = {})",
318                start,
319                bytes,
320                new_chunk,
321                start,
322                SFT_MAP.get_checked(start).name()
323            );
324            debug_assert!(
325                SFT_MAP.get_checked(start + bytes - 1).name() != EMPTY_SFT_NAME,
326                "In grow_space(start = {}, bytes = {}, new_chunk = {}), we have empty SFT entries (chunk for {} = {})",
327                start,
328                bytes,
329                new_chunk,
330                start + bytes - 1,
331                SFT_MAP.get_checked(start + bytes - 1).name()
332            );
333        }
334
335        if new_chunk {
336            unsafe { SFT_MAP.update(self.as_sft(), start, bytes) };
337        }
338    }
339
340    /// Estimate the amount of side metadata memory needed for a give data memory size in pages. The
341    /// result will over-estimate the amount of metadata pages needed, with at least one page per
342    /// side metadata.  This relatively accurately describes the number of side metadata pages the
343    /// space actually consumes.
344    ///
345    /// This function is used for both triggering GC (via [`Space::reserved_pages`]) and resizing
346    /// the heap (via [`crate::util::heap::GCTriggerPolicy::on_pending_allocation`]).
347    fn estimate_side_meta_pages(&self, data_pages: usize) -> usize {
348        self.common().metadata.calculate_reserved_pages(data_pages)
349    }
350
351    fn reserved_pages(&self) -> usize {
352        let data_pages = self.get_page_resource().reserved_pages();
353        let meta_pages = self.estimate_side_meta_pages(data_pages);
354        data_pages + meta_pages
355    }
356
357    /// Return the number of physical pages available.
358    fn available_physical_pages(&self) -> usize {
359        self.get_page_resource().get_available_physical_pages()
360    }
361
362    fn get_name(&self) -> &'static str {
363        self.common().name
364    }
365
366    fn get_descriptor(&self) -> SpaceDescriptor {
367        self.common().descriptor
368    }
369
370    fn common(&self) -> &CommonSpace<VM>;
371    fn get_gc_trigger(&self) -> &GCTrigger<VM> {
372        self.common().gc_trigger.as_ref()
373    }
374
375    fn release_multiple_pages(&mut self, start: Address);
376
377    /// What copy semantic we should use for this space if we copy objects from this space.
378    /// This is only needed for plans that use SFTProcessEdges
379    fn set_copy_for_sft_trace(&mut self, _semantics: Option<CopySemantics>) {
380        panic!("A copying space should override this method")
381    }
382
383    /// Ensure that the current space's metadata context does not have any issues.
384    /// Panics with a suitable message if any issue is detected.
385    /// It also initialises the sanity maps which will then be used if the `extreme_assertions` feature is active.
386    /// Internally this calls verify_metadata_context() from `util::metadata::sanity`
387    ///
388    /// This function is called once per space by its parent plan but may be called multiple times per policy.
389    ///
390    /// Arguments:
391    /// * `side_metadata_sanity_checker`: The `SideMetadataSanity` object instantiated in the calling plan.
392    fn verify_side_metadata_sanity(&self, side_metadata_sanity_checker: &mut SideMetadataSanity) {
393        side_metadata_sanity_checker
394            .verify_metadata_context(std::any::type_name::<Self>(), &self.common().metadata)
395    }
396
397    /// Enumerate objects in the current space.
398    ///
399    /// Implementers can use the `enumerator` to report
400    ///
401    /// -   individual objects within the space using `enumerator.visit_object`, and
402    /// -   ranges of address that may contain objects using `enumerator.visit_address_range`. The
403    ///     caller will then enumerate objects in the range using the VO bits metadata.
404    ///
405    /// Each object in the space shall be covered by one of the two methods above.
406    ///
407    /// # Implementation considerations
408    ///
409    /// **Skipping empty ranges**: When enumerating address ranges, spaces can skip ranges (blocks,
410    /// chunks, etc.) that are guarenteed not to contain objects.
411    ///
412    /// **Dynamic dispatch**: Because `Space` is a trait object type and `enumerator` is a `dyn`
413    /// reference, invoking methods of `enumerator` involves a dynamic dispatching.  But the
414    /// overhead is OK if we call it a block at a time because scanning the VO bits will dominate
415    /// the execution time.  For LOS, it will be cheaper to enumerate individual objects than
416    /// scanning VO bits because it is sparse.
417    fn enumerate_objects(&self, enumerator: &mut dyn ObjectEnumerator);
418
419    fn set_allocate_as_live(&self, live: bool) {
420        self.common()
421            .allocate_as_live
422            .store(live, std::sync::atomic::Ordering::SeqCst);
423    }
424
425    fn should_allocate_as_live(&self) -> bool {
426        self.common()
427            .allocate_as_live
428            .load(std::sync::atomic::Ordering::Acquire)
429    }
430
431    /// Clear the side log bits for allocated regions in this space.
432    /// This method is only called if the plan knows the log bits are side metadata.
433    fn clear_side_log_bits(&self);
434
435    /// Set the side log bits for allocated regions in this space.
436    /// This method is only called if the plan knows the log bits are side metadata.
437    fn set_side_log_bits(&self);
438}
439
440/// Print the VM map for a space.
441/// Space needs to be object-safe, so it cannot have methods that use extra generic type paramters. So this method is placed outside the Space trait.
442/// This method can be invoked on a &dyn Space (space.as_space() will return &dyn Space).
443#[allow(unused)]
444pub(crate) fn print_vm_map<VM: VMBinding>(
445    space: &dyn Space<VM>,
446    out: &mut impl std::fmt::Write,
447) -> Result<(), std::fmt::Error> {
448    let common = space.common();
449    write!(out, "{} ", common.name)?;
450    if common.immortal {
451        write!(out, "I")?;
452    } else {
453        write!(out, " ")?;
454    }
455    if common.movable {
456        write!(out, " ")?;
457    } else {
458        write!(out, "N")?;
459    }
460    write!(out, " ")?;
461    if common.contiguous {
462        write!(
463            out,
464            "{}->{}",
465            common.start,
466            common.start + common.extent - 1
467        )?;
468        match common.vmrequest {
469            VMRequest::Extent { extent, .. } => {
470                write!(out, " E {}", extent)?;
471            }
472            VMRequest::Fraction { frac, .. } => {
473                write!(out, " F {}", frac)?;
474            }
475            _ => {}
476        }
477    } else {
478        let mut a = space
479            .get_page_resource()
480            .common()
481            .get_head_discontiguous_region();
482        while !a.is_zero() {
483            write!(
484                out,
485                "{}->{}",
486                a,
487                a + space.common().vm_map().get_contiguous_region_size(a) - 1
488            )?;
489            a = space.common().vm_map().get_next_contiguous_region(a);
490            if !a.is_zero() {
491                write!(out, " ")?;
492            }
493        }
494    }
495    writeln!(out)?;
496
497    Ok(())
498}
499
500impl_downcast!(Space<VM> where VM: VMBinding);
501
502pub struct CommonSpace<VM: VMBinding> {
503    pub name: &'static str,
504    pub descriptor: SpaceDescriptor,
505    pub vmrequest: VMRequest,
506
507    /// For a copying space that allows sft_trace_object(), this should be set before each GC so we know
508    // the copy semantics for the space.
509    pub copy: Option<CopySemantics>,
510
511    pub immortal: bool,
512    pub movable: bool,
513    pub contiguous: bool,
514    pub zeroed: bool,
515
516    pub permission_exec: bool,
517
518    pub start: Address,
519    pub extent: usize,
520
521    pub vm_map: &'static dyn VMMap,
522    pub mmapper: &'static dyn Mmapper,
523
524    pub(crate) metadata: SideMetadataContext,
525
526    /// This field equals to needs_log_bit in the plan constraints.
527    // TODO: This should be a constant for performance.
528    pub needs_log_bit: bool,
529    pub unlog_allocated_object: bool,
530    pub unlog_traced_object: bool,
531
532    /// A lock used during acquire() to make sure only one thread can allocate.
533    pub acquire_lock: Mutex<()>,
534
535    pub gc_trigger: Arc<GCTrigger<VM>>,
536    pub global_state: Arc<GlobalState>,
537    pub options: Arc<Options>,
538
539    pub allocate_as_live: AtomicBool,
540
541    p: PhantomData<VM>,
542}
543
544/// Arguments passed from a policy to create a space. This includes policy specific args.
545pub struct PolicyCreateSpaceArgs<'a, VM: VMBinding> {
546    pub plan_args: PlanCreateSpaceArgs<'a, VM>,
547    pub movable: bool,
548    pub immortal: bool,
549    pub local_side_metadata_specs: Vec<SideMetadataSpec>,
550}
551
552/// Arguments passed from a plan to create a space.
553pub struct PlanCreateSpaceArgs<'a, VM: VMBinding> {
554    pub name: &'static str,
555    pub zeroed: bool,
556    pub permission_exec: bool,
557    pub unlog_allocated_object: bool,
558    pub unlog_traced_object: bool,
559    pub vmrequest: VMRequest,
560    pub global_side_metadata_specs: Vec<SideMetadataSpec>,
561    pub vm_map: &'static dyn VMMap,
562    pub mmapper: &'static dyn Mmapper,
563    pub heap: &'a mut HeapMeta,
564    pub constraints: &'a PlanConstraints,
565    pub gc_trigger: Arc<GCTrigger<VM>>,
566    pub scheduler: Arc<GCWorkScheduler<VM>>,
567    pub options: Arc<Options>,
568    pub global_state: Arc<GlobalState>,
569}
570
571impl<'a, VM: VMBinding> PlanCreateSpaceArgs<'a, VM> {
572    /// Turning PlanCreateSpaceArgs into a PolicyCreateSpaceArgs
573    pub fn into_policy_args(
574        self,
575        movable: bool,
576        immortal: bool,
577        policy_metadata_specs: Vec<SideMetadataSpec>,
578    ) -> PolicyCreateSpaceArgs<'a, VM> {
579        PolicyCreateSpaceArgs {
580            movable,
581            immortal,
582            local_side_metadata_specs: policy_metadata_specs,
583            plan_args: self,
584        }
585    }
586}
587
588impl<VM: VMBinding> CommonSpace<VM> {
589    pub fn new(args: PolicyCreateSpaceArgs<VM>) -> Self {
590        let mut rtn = CommonSpace {
591            name: args.plan_args.name,
592            descriptor: SpaceDescriptor::UNINITIALIZED,
593            vmrequest: args.plan_args.vmrequest,
594            copy: None,
595            immortal: args.immortal,
596            movable: args.movable,
597            contiguous: true,
598            permission_exec: args.plan_args.permission_exec,
599            zeroed: args.plan_args.zeroed,
600            start: unsafe { Address::zero() },
601            extent: 0,
602            vm_map: args.plan_args.vm_map,
603            mmapper: args.plan_args.mmapper,
604            needs_log_bit: args.plan_args.constraints.needs_log_bit,
605            unlog_allocated_object: args.plan_args.unlog_allocated_object,
606            unlog_traced_object: args.plan_args.unlog_traced_object,
607            gc_trigger: args.plan_args.gc_trigger,
608            metadata: SideMetadataContext {
609                global: args.plan_args.global_side_metadata_specs,
610                local: args.local_side_metadata_specs,
611            },
612            acquire_lock: Mutex::new(()),
613            global_state: args.plan_args.global_state,
614            options: args.plan_args.options.clone(),
615            allocate_as_live: AtomicBool::new(false),
616            p: PhantomData,
617        };
618
619        let vmrequest = args.plan_args.vmrequest;
620        if vmrequest.is_discontiguous() {
621            rtn.contiguous = false;
622            // FIXME
623            rtn.descriptor = SpaceDescriptor::create_descriptor();
624            // VM.memory.setHeapRange(index, HEAP_START, HEAP_END);
625            return rtn;
626        }
627
628        let (extent, top) = match vmrequest {
629            VMRequest::Fraction { frac, top: _top } => (get_frac_available(frac), _top),
630            VMRequest::Extent {
631                extent: _extent,
632                top: _top,
633            } => (_extent, _top),
634            VMRequest::Fixed {
635                extent: _extent, ..
636            } => (_extent, false),
637            _ => unreachable!(),
638        };
639
640        assert!(
641            extent == raw_align_up(extent, BYTES_IN_CHUNK),
642            "{} requested non-aligned extent: {} bytes",
643            rtn.name,
644            extent
645        );
646
647        let start = if let VMRequest::Fixed { start: _start, .. } = vmrequest {
648            _start
649        } else {
650            // FIXME
651            //if (HeapLayout.vmMap.isFinalized()) VM.assertions.fail("heap is narrowed after regionMap is finalized: " + name);
652            args.plan_args.heap.reserve(extent, top)
653        };
654        assert!(
655            start == chunk_align_up(start),
656            "{} starting on non-aligned boundary: {}",
657            rtn.name,
658            start
659        );
660
661        rtn.contiguous = true;
662        rtn.start = start;
663        rtn.extent = extent;
664        // FIXME
665        rtn.descriptor = SpaceDescriptor::create_descriptor_from_heap_range(start, start + extent);
666        // VM.memory.setHeapRange(index, start, start.plus(extent));
667
668        // We only initialize our vm map if the range of the space is in our available heap range. For normally spaces,
669        // they are definitely in our heap range. But for VM space, a runtime could give us an arbitrary range. We only
670        // insert into our vm map if the range overlaps with our heap.
671        {
672            use crate::util::heap::layout;
673            let overlap =
674                Address::range_intersection(&(start..start + extent), &layout::available_range());
675            if !overlap.is_empty() {
676                args.plan_args.vm_map.insert(
677                    overlap.start,
678                    overlap.end - overlap.start,
679                    rtn.descriptor,
680                );
681            }
682        }
683
684        debug!(
685            "Created space {} [{}, {}) for {} bytes",
686            rtn.name,
687            start,
688            start + extent,
689            extent
690        );
691
692        rtn
693    }
694
695    pub fn initialize_sft(
696        &self,
697        sft: &(dyn SFT + Sync + 'static),
698        sft_map: &mut dyn crate::policy::sft_map::SFTMap,
699    ) {
700        // We have to keep this for now: if a space is contiguous, our page resource will NOT consider newly allocated chunks
701        // as new chunks (new_chunks = true). In that case, in grow_space(), we do not set SFT when new_chunks = false.
702        // We can fix this by either of these:
703        // * fix page resource, so it propelry returns new_chunk
704        // * change grow_space() so it sets SFT no matter what the new_chunks value is.
705        // FIXME: eagerly initializing SFT is not a good idea.
706        if self.contiguous {
707            unsafe { sft_map.eager_initialize(sft, self.start, self.extent) };
708        }
709    }
710
711    pub fn vm_map(&self) -> &'static dyn VMMap {
712        self.vm_map
713    }
714
715    pub fn mmap_protection(&self) -> MmapProtection {
716        if self.permission_exec || cfg!(feature = "exec_permission_on_all_spaces") {
717            MmapProtection::ReadWriteExec
718        } else {
719            MmapProtection::ReadWrite
720        }
721    }
722
723    pub(crate) fn debug_print_object_global_info(&self, object: ObjectReference) {
724        #[cfg(feature = "vo_bit")]
725        println!(
726            "vo bit = {}",
727            crate::util::metadata::vo_bit::is_vo_bit_set(object)
728        );
729        if self.needs_log_bit {
730            use crate::vm::object_model::ObjectModel;
731            use std::sync::atomic::Ordering;
732            println!(
733                "log bit = {}",
734                VM::VMObjectModel::GLOBAL_LOG_BIT_SPEC.is_unlogged::<VM>(object, Ordering::Relaxed),
735            );
736        }
737        println!("is live = {}", object.is_live());
738    }
739}
740
741fn get_frac_available(frac: f32) -> usize {
742    trace!("AVAILABLE_START={}", vm_layout().available_start());
743    trace!("AVAILABLE_END={}", vm_layout().available_end());
744    let bytes = (frac * vm_layout().available_bytes() as f32) as usize;
745    trace!("bytes={}*{}={}", frac, vm_layout().available_bytes(), bytes);
746    let mb = bytes >> LOG_BYTES_IN_MBYTE;
747    let rtn = mb << LOG_BYTES_IN_MBYTE;
748    trace!("rtn={}", rtn);
749    let aligned_rtn = raw_align_up(rtn, BYTES_IN_CHUNK);
750    trace!("aligned_rtn={}", aligned_rtn);
751    aligned_rtn
752}
753
754pub fn required_chunks(pages: usize) -> usize {
755    let extent = raw_align_up(pages_to_bytes(pages), BYTES_IN_CHUNK);
756    extent >> LOG_BYTES_IN_CHUNK
757}