mmtk/policy/
space.rs

1use crate::global_state::GlobalState;
2use crate::plan::PlanConstraints;
3use crate::scheduler::GCWorkScheduler;
4use crate::util::conversions::*;
5use crate::util::metadata::side_metadata::{
6    SideMetadataContext, SideMetadataSanity, SideMetadataSpec,
7};
8use crate::util::object_enum::ObjectEnumerator;
9use crate::util::Address;
10use crate::util::ObjectReference;
11
12use crate::util::heap::layout::vm_layout::{vm_layout, LOG_BYTES_IN_CHUNK};
13use crate::util::heap::{PageResource, VMRequest};
14use crate::util::options::Options;
15use crate::vm::{ActivePlan, Collection};
16
17use crate::util::constants::LOG_BYTES_IN_MBYTE;
18use crate::util::conversions;
19use crate::util::opaque_pointer::*;
20
21use crate::mmtk::SFT_MAP;
22#[cfg(debug_assertions)]
23use crate::policy::sft::EMPTY_SFT_NAME;
24use crate::policy::sft::SFT;
25use crate::util::alloc::allocator::AllocationOptions;
26use crate::util::copy::*;
27use crate::util::heap::gc_trigger::GCTrigger;
28use crate::util::heap::layout::vm_layout::BYTES_IN_CHUNK;
29use crate::util::heap::layout::Mmapper;
30use crate::util::heap::layout::VMMap;
31use crate::util::heap::space_descriptor::SpaceDescriptor;
32use crate::util::heap::HeapMeta;
33use crate::util::os::*;
34use crate::vm::VMBinding;
35
36use std::marker::PhantomData;
37use std::sync::atomic::AtomicBool;
38use std::sync::Arc;
39use std::sync::Mutex;
40
41use downcast_rs::Downcast;
42
43pub trait Space<VM: VMBinding>: 'static + SFT + Sync + Downcast {
44    fn as_space(&self) -> &dyn Space<VM>;
45    fn as_sft(&self) -> &(dyn SFT + Sync + 'static);
46    fn get_page_resource(&self) -> &dyn PageResource<VM>;
47
48    /// Get a mutable reference to the underlying page resource, or `None` if the space does not
49    /// have a page resource.
50    fn maybe_get_page_resource_mut(&mut self) -> Option<&mut dyn PageResource<VM>>;
51
52    /// Initialize entires in SFT map for the space. This is called when the Space object
53    /// has a non-moving address, as we will use the address to set sft.
54    /// Currently after we create a boxed plan, spaces in the plan have a non-moving address.
55    fn initialize_sft(&self, sft_map: &mut dyn crate::policy::sft_map::SFTMap);
56
57    fn acquire(&self, tls: VMThread, pages: usize, alloc_options: AllocationOptions) -> Address {
58        trace!(
59            "Space.acquire, tls={:?}, alloc_options={:?}",
60            tls,
61            alloc_options
62        );
63
64        debug_assert!(
65            !self.get_gc_trigger().will_oom_on_alloc(pages << crate::util::constants::LOG_BYTES_IN_PAGE),
66            "The requested pages is larger than the max heap size. Is will_go_oom_on_acquire used before acquring memory?"
67        );
68
69        trace!("Reserving pages");
70        let pr = self.get_page_resource();
71        let pages_reserved = pr.reserve_pages(pages);
72        trace!("Pages reserved");
73
74        // Should we poll before acquring pages from page resources so that it can trigger a GC?
75        // - If tls is collector, we cannot attempt a GC.
76        let should_poll = VM::VMActivePlan::is_mutator(tls);
77
78        // If we should poll, do it now.  Record if it has triggered a GC.
79        // If we should not poll, GC is not triggered.
80        let gc_triggered = should_poll && {
81            trace!("Polling ..");
82            self.get_gc_trigger().poll(false, Some(self.as_space()))
83        };
84
85        // We can try to get pages if
86        // - GC is not triggered, or
87        // - GC is triggered, but we allow over-committing.
88        let should_get_pages = !gc_triggered || alloc_options.allow_overcommit;
89
90        // Get new pages if we should. If we didn't get new pages from the page resource for any
91        // reason (if we decided not to, or if we tried and failed), this function shall return a
92        // null address.
93        if should_get_pages {
94            if let Some(addr) = self.get_new_pages_and_initialize(tls, pages, pr, pages_reserved) {
95                addr
96            } else {
97                self.not_acquiring(tls, alloc_options, pr, pages_reserved, true);
98                Address::ZERO
99            }
100        } else {
101            self.not_acquiring(tls, alloc_options, pr, pages_reserved, false);
102            Address::ZERO
103        }
104    }
105
106    /// Get new pages from the page resource, and do necessary initialization, including mmapping
107    /// and zeroing the memory.
108    ///
109    /// The caller must have reserved pages from the page resource.  If successfully acquired pages
110    /// from the page resource, the reserved pages will be committed.
111    ///
112    /// Returns `None` if failed to acquire memory from the page resource.  The caller should call
113    /// `pr.clear_request`.
114    fn get_new_pages_and_initialize(
115        &self,
116        tls: VMThread,
117        pages: usize,
118        pr: &dyn PageResource<VM>,
119        pages_reserved: usize,
120    ) -> Option<Address> {
121        // We need this lock: Othrewise, it is possible that one thread acquires pages in a new chunk, but not yet
122        // set SFT for it (in grow_space()), and another thread acquires pages in the same chunk, which is not
123        // a new chunk so grow_space() won't be called on it. The second thread could return a result in the chunk before
124        // its SFT is properly set.
125        // We need to minimize the scope of this lock for performance when we have many threads (mutator threads, or GC threads with copying allocators).
126        // See: https://github.com/mmtk/mmtk-core/issues/610
127        let lock = self.common().acquire_lock.lock().unwrap();
128
129        let Ok(res) = pr.get_new_pages(self.common().descriptor, pages_reserved, pages, tls) else {
130            return None;
131        };
132
133        debug!(
134            "Got new pages {} ({} pages) for {} in chunk {}, new_chunk? {}",
135            res.start,
136            res.pages,
137            self.get_name(),
138            conversions::chunk_align_down(res.start),
139            res.new_chunk
140        );
141        let bytes = conversions::pages_to_bytes(res.pages);
142
143        let mmap = || {
144            // Mmap the pages and the side metadata, and handle error. In case of any error,
145            // we will either call back to the VM for OOM, or simply panic.
146            if let Err(mmap_error) = self
147                .common()
148                .mmapper
149                .ensure_mapped(
150                    res.start,
151                    res.pages,
152                    if *self.common().options.transparent_hugepages {
153                        HugePageSupport::TransparentHugePages
154                    } else {
155                        HugePageSupport::No
156                    },
157                    self.common().mmap_protection(),
158                    &MmapAnnotation::Space {
159                        name: self.get_name(),
160                    },
161                )
162                .and(self.common().metadata.try_map_metadata_space(
163                    res.start,
164                    bytes,
165                    self.get_name(),
166                ))
167            {
168                OS::handle_mmap_error::<VM>(mmap_error, tls);
169            }
170        };
171        let grow_space = || {
172            self.grow_space(res.start, bytes, res.new_chunk);
173        };
174
175        // The scope of the lock is important in terms of performance when we have many allocator threads.
176        if SFT_MAP.get_side_metadata().is_some() {
177            // If the SFT map uses side metadata, so we have to initialize side metadata first.
178            mmap();
179            // then grow space, which will use the side metadata we mapped above
180            grow_space();
181            // then we can drop the lock after grow_space()
182            drop(lock);
183        } else {
184            // In normal cases, we can drop lock immediately after grow_space()
185            grow_space();
186            drop(lock);
187            // and map side metadata without holding the lock
188            mmap();
189        }
190
191        // TODO: Concurrent zeroing
192        if self.common().zeroed {
193            crate::util::memory::zero(res.start, bytes);
194        }
195
196        // Some assertions
197        {
198            // --- Assert the start of the allocated region ---
199            // The start address SFT should be correct.
200            debug_assert_eq!(SFT_MAP.get_checked(res.start).name(), self.get_name());
201            // The start address is in our space.
202            debug_assert!(self.address_in_space(res.start));
203            // The descriptor should be correct.
204            debug_assert_eq!(
205                self.common().vm_map().get_descriptor_for_address(res.start),
206                self.common().descriptor
207            );
208
209            // --- Assert the last byte in the allocated region ---
210            let last_byte = res.start + bytes - 1;
211            // The SFT for the last byte in the allocated memory should be correct.
212            debug_assert_eq!(SFT_MAP.get_checked(last_byte).name(), self.get_name());
213            // The last byte in the allocated memory should be in this space.
214            debug_assert!(self.address_in_space(last_byte));
215            // The descriptor for the last byte should be correct.
216            debug_assert_eq!(
217                self.common().vm_map().get_descriptor_for_address(last_byte),
218                self.common().descriptor
219            );
220        }
221
222        debug!("Space.acquire(), returned = {}", res.start);
223        Some(res.start)
224    }
225
226    /// Handle the case where [`Space::acquire`] will not or can not acquire pages from the page
227    /// resource.  This may happen when
228    /// -   GC is triggered and the allocation does not allow over-committing, or
229    /// -   the allocation tried to acquire pages from the page resource but ran out of physical
230    ///     memory.
231    fn not_acquiring(
232        &self,
233        tls: VMThread,
234        alloc_options: AllocationOptions,
235        pr: &dyn PageResource<VM>,
236        pages_reserved: usize,
237        attempted_allocation_and_failed: bool,
238    ) {
239        assert!(
240            VM::VMActivePlan::is_mutator(tls),
241            "A non-mutator thread failed to get pages from page resource.  \
242            Copying GC plans should compute the copying headroom carefully to prevent this."
243        );
244
245        // Clear the request
246        pr.clear_request(pages_reserved);
247
248        // If we are not at a safepoint, return immediately.
249        if !alloc_options.at_safepoint {
250            return;
251        }
252
253        debug!("Collection required");
254
255        if !self.common().global_state.is_initialized() {
256            // Otherwise do GC here
257            panic!(
258                "GC is not allowed here: collection is not initialized \
259                    (did you call initialize_collection()?).  \
260                    Out of physical memory: {phy}",
261                phy = attempted_allocation_and_failed
262            );
263        }
264
265        if attempted_allocation_and_failed {
266            // We thought we had memory to allocate, but somehow failed the allocation. Will force a GC.
267            let gc_performed = self.get_gc_trigger().poll(true, Some(self.as_space()));
268            debug_assert!(gc_performed, "GC not performed when forced.");
269        }
270
271        // Inform GC trigger about the pending allocation.
272        let meta_pages_reserved = self.estimate_side_meta_pages(pages_reserved);
273        let total_pages_reserved = pages_reserved + meta_pages_reserved;
274        self.get_gc_trigger()
275            .policy
276            .on_pending_allocation(total_pages_reserved);
277
278        VM::VMCollection::block_for_gc(VMMutatorThread(tls)); // We have checked that this is mutator
279    }
280
281    fn address_in_space(&self, start: Address) -> bool {
282        if !self.common().descriptor.is_contiguous() {
283            self.common().vm_map().get_descriptor_for_address(start) == self.common().descriptor
284        } else {
285            start >= self.common().start && start < self.common().start + self.common().extent
286        }
287    }
288
289    fn in_space(&self, object: ObjectReference) -> bool {
290        self.address_in_space(object.to_raw_address())
291    }
292
293    /**
294     * This is called after we get result from page resources.  The space may
295     * tap into the hook to monitor heap growth.  The call is made from within the
296     * page resources' critical region, immediately before yielding the lock.
297     *
298     * @param start The start of the newly allocated space
299     * @param bytes The size of the newly allocated space
300     * @param new_chunk {@code true} if the new space encroached upon or started a new chunk or chunks.
301     */
302    fn grow_space(&self, start: Address, bytes: usize, new_chunk: bool) {
303        trace!(
304            "Grow space from {} for {} bytes (new chunk = {})",
305            start,
306            bytes,
307            new_chunk
308        );
309
310        // If this is not a new chunk, the SFT for [start, start + bytes) should alreayd be initialized.
311        #[cfg(debug_assertions)]
312        if !new_chunk {
313            debug_assert!(
314                SFT_MAP.get_checked(start).name() != EMPTY_SFT_NAME,
315                "In grow_space(start = {}, bytes = {}, new_chunk = {}), we have empty SFT entries (chunk for {} = {})",
316                start,
317                bytes,
318                new_chunk,
319                start,
320                SFT_MAP.get_checked(start).name()
321            );
322            debug_assert!(
323                SFT_MAP.get_checked(start + bytes - 1).name() != EMPTY_SFT_NAME,
324                "In grow_space(start = {}, bytes = {}, new_chunk = {}), we have empty SFT entries (chunk for {} = {})",
325                start,
326                bytes,
327                new_chunk,
328                start + bytes - 1,
329                SFT_MAP.get_checked(start + bytes - 1).name()
330            );
331        }
332
333        if new_chunk {
334            unsafe { SFT_MAP.update(self.as_sft(), start, bytes) };
335        }
336    }
337
338    /// Estimate the amount of side metadata memory needed for a give data memory size in pages. The
339    /// result will over-estimate the amount of metadata pages needed, with at least one page per
340    /// side metadata.  This relatively accurately describes the number of side metadata pages the
341    /// space actually consumes.
342    ///
343    /// This function is used for both triggering GC (via [`Space::reserved_pages`]) and resizing
344    /// the heap (via [`crate::util::heap::GCTriggerPolicy::on_pending_allocation`]).
345    fn estimate_side_meta_pages(&self, data_pages: usize) -> usize {
346        self.common().metadata.calculate_reserved_pages(data_pages)
347    }
348
349    fn reserved_pages(&self) -> usize {
350        let data_pages = self.get_page_resource().reserved_pages();
351        let meta_pages = self.estimate_side_meta_pages(data_pages);
352        data_pages + meta_pages
353    }
354
355    /// Return the number of physical pages available.
356    fn available_physical_pages(&self) -> usize {
357        self.get_page_resource().get_available_physical_pages()
358    }
359
360    fn get_name(&self) -> &'static str {
361        self.common().name
362    }
363
364    fn get_descriptor(&self) -> SpaceDescriptor {
365        self.common().descriptor
366    }
367
368    fn common(&self) -> &CommonSpace<VM>;
369    fn get_gc_trigger(&self) -> &GCTrigger<VM> {
370        self.common().gc_trigger.as_ref()
371    }
372
373    fn release_multiple_pages(&mut self, start: Address);
374
375    /// What copy semantic we should use for this space if we copy objects from this space.
376    /// This is only needed for plans that use SFTProcessEdges
377    fn set_copy_for_sft_trace(&mut self, _semantics: Option<CopySemantics>) {
378        panic!("A copying space should override this method")
379    }
380
381    /// Ensure that the current space's metadata context does not have any issues.
382    /// Panics with a suitable message if any issue is detected.
383    /// It also initialises the sanity maps which will then be used if the `extreme_assertions` feature is active.
384    /// Internally this calls verify_metadata_context() from `util::metadata::sanity`
385    ///
386    /// This function is called once per space by its parent plan but may be called multiple times per policy.
387    ///
388    /// Arguments:
389    /// * `side_metadata_sanity_checker`: The `SideMetadataSanity` object instantiated in the calling plan.
390    fn verify_side_metadata_sanity(&self, side_metadata_sanity_checker: &mut SideMetadataSanity) {
391        side_metadata_sanity_checker
392            .verify_metadata_context(std::any::type_name::<Self>(), &self.common().metadata)
393    }
394
395    /// Enumerate objects in the current space.
396    ///
397    /// Implementers can use the `enumerator` to report
398    ///
399    /// -   individual objects within the space using `enumerator.visit_object`, and
400    /// -   ranges of address that may contain objects using `enumerator.visit_address_range`. The
401    ///     caller will then enumerate objects in the range using the VO bits metadata.
402    ///
403    /// Each object in the space shall be covered by one of the two methods above.
404    ///
405    /// # Implementation considerations
406    ///
407    /// **Skipping empty ranges**: When enumerating address ranges, spaces can skip ranges (blocks,
408    /// chunks, etc.) that are guarenteed not to contain objects.
409    ///
410    /// **Dynamic dispatch**: Because `Space` is a trait object type and `enumerator` is a `dyn`
411    /// reference, invoking methods of `enumerator` involves a dynamic dispatching.  But the
412    /// overhead is OK if we call it a block at a time because scanning the VO bits will dominate
413    /// the execution time.  For LOS, it will be cheaper to enumerate individual objects than
414    /// scanning VO bits because it is sparse.
415    fn enumerate_objects(&self, enumerator: &mut dyn ObjectEnumerator);
416
417    fn set_allocate_as_live(&self, live: bool) {
418        self.common()
419            .allocate_as_live
420            .store(live, std::sync::atomic::Ordering::SeqCst);
421    }
422
423    fn should_allocate_as_live(&self) -> bool {
424        self.common()
425            .allocate_as_live
426            .load(std::sync::atomic::Ordering::Acquire)
427    }
428
429    /// Clear the side log bits for allocated regions in this space.
430    /// This method is only called if the plan knows the log bits are side metadata.
431    fn clear_side_log_bits(&self);
432
433    /// Set the side log bits for allocated regions in this space.
434    /// This method is only called if the plan knows the log bits are side metadata.
435    fn set_side_log_bits(&self);
436}
437
438/// Print the VM map for a space.
439/// Space needs to be object-safe, so it cannot have methods that use extra generic type paramters. So this method is placed outside the Space trait.
440/// This method can be invoked on a &dyn Space (space.as_space() will return &dyn Space).
441#[allow(unused)]
442pub(crate) fn print_vm_map<VM: VMBinding>(
443    space: &dyn Space<VM>,
444    out: &mut impl std::fmt::Write,
445) -> Result<(), std::fmt::Error> {
446    let common = space.common();
447    write!(out, "{} ", common.name)?;
448    if common.immortal {
449        write!(out, "I")?;
450    } else {
451        write!(out, " ")?;
452    }
453    if common.movable {
454        write!(out, " ")?;
455    } else {
456        write!(out, "N")?;
457    }
458    write!(out, " ")?;
459    if common.contiguous {
460        write!(
461            out,
462            "{}->{}",
463            common.start,
464            common.start + common.extent - 1
465        )?;
466        match common.vmrequest {
467            VMRequest::Extent { extent, .. } => {
468                write!(out, " E {}", extent)?;
469            }
470            VMRequest::Fraction { frac, .. } => {
471                write!(out, " F {}", frac)?;
472            }
473            _ => {}
474        }
475    } else {
476        let mut a = space
477            .get_page_resource()
478            .common()
479            .get_head_discontiguous_region();
480        while !a.is_zero() {
481            write!(
482                out,
483                "{}->{}",
484                a,
485                a + space.common().vm_map().get_contiguous_region_size(a) - 1
486            )?;
487            a = space.common().vm_map().get_next_contiguous_region(a);
488            if !a.is_zero() {
489                write!(out, " ")?;
490            }
491        }
492    }
493    writeln!(out)?;
494
495    Ok(())
496}
497
498impl_downcast!(Space<VM> where VM: VMBinding);
499
500pub struct CommonSpace<VM: VMBinding> {
501    pub name: &'static str,
502    pub descriptor: SpaceDescriptor,
503    pub vmrequest: VMRequest,
504
505    /// For a copying space that allows sft_trace_object(), this should be set before each GC so we know
506    // the copy semantics for the space.
507    pub copy: Option<CopySemantics>,
508
509    pub immortal: bool,
510    pub movable: bool,
511    pub contiguous: bool,
512    pub zeroed: bool,
513
514    pub permission_exec: bool,
515
516    pub start: Address,
517    pub extent: usize,
518
519    pub vm_map: &'static dyn VMMap,
520    pub mmapper: &'static dyn Mmapper,
521
522    pub(crate) metadata: SideMetadataContext,
523
524    /// This field equals to needs_log_bit in the plan constraints.
525    // TODO: This should be a constant for performance.
526    pub needs_log_bit: bool,
527    pub unlog_allocated_object: bool,
528    pub unlog_traced_object: bool,
529
530    /// A lock used during acquire() to make sure only one thread can allocate.
531    pub acquire_lock: Mutex<()>,
532
533    pub gc_trigger: Arc<GCTrigger<VM>>,
534    pub global_state: Arc<GlobalState>,
535    pub options: Arc<Options>,
536
537    pub allocate_as_live: AtomicBool,
538
539    p: PhantomData<VM>,
540}
541
542/// Arguments passed from a policy to create a space. This includes policy specific args.
543pub struct PolicyCreateSpaceArgs<'a, VM: VMBinding> {
544    pub plan_args: PlanCreateSpaceArgs<'a, VM>,
545    pub movable: bool,
546    pub immortal: bool,
547    pub local_side_metadata_specs: Vec<SideMetadataSpec>,
548}
549
550/// Arguments passed from a plan to create a space.
551pub struct PlanCreateSpaceArgs<'a, VM: VMBinding> {
552    pub name: &'static str,
553    pub zeroed: bool,
554    pub permission_exec: bool,
555    pub unlog_allocated_object: bool,
556    pub unlog_traced_object: bool,
557    pub vmrequest: VMRequest,
558    pub global_side_metadata_specs: Vec<SideMetadataSpec>,
559    pub vm_map: &'static dyn VMMap,
560    pub mmapper: &'static dyn Mmapper,
561    pub heap: &'a mut HeapMeta,
562    pub constraints: &'a PlanConstraints,
563    pub gc_trigger: Arc<GCTrigger<VM>>,
564    pub scheduler: Arc<GCWorkScheduler<VM>>,
565    pub options: Arc<Options>,
566    pub global_state: Arc<GlobalState>,
567}
568
569impl<'a, VM: VMBinding> PlanCreateSpaceArgs<'a, VM> {
570    /// Turning PlanCreateSpaceArgs into a PolicyCreateSpaceArgs
571    pub fn into_policy_args(
572        self,
573        movable: bool,
574        immortal: bool,
575        policy_metadata_specs: Vec<SideMetadataSpec>,
576    ) -> PolicyCreateSpaceArgs<'a, VM> {
577        PolicyCreateSpaceArgs {
578            movable,
579            immortal,
580            local_side_metadata_specs: policy_metadata_specs,
581            plan_args: self,
582        }
583    }
584}
585
586impl<VM: VMBinding> CommonSpace<VM> {
587    pub fn new(args: PolicyCreateSpaceArgs<VM>) -> Self {
588        let mut rtn = CommonSpace {
589            name: args.plan_args.name,
590            descriptor: SpaceDescriptor::UNINITIALIZED,
591            vmrequest: args.plan_args.vmrequest,
592            copy: None,
593            immortal: args.immortal,
594            movable: args.movable,
595            contiguous: true,
596            permission_exec: args.plan_args.permission_exec,
597            zeroed: args.plan_args.zeroed,
598            start: unsafe { Address::zero() },
599            extent: 0,
600            vm_map: args.plan_args.vm_map,
601            mmapper: args.plan_args.mmapper,
602            needs_log_bit: args.plan_args.constraints.needs_log_bit,
603            unlog_allocated_object: args.plan_args.unlog_allocated_object,
604            unlog_traced_object: args.plan_args.unlog_traced_object,
605            gc_trigger: args.plan_args.gc_trigger,
606            metadata: SideMetadataContext {
607                global: args.plan_args.global_side_metadata_specs,
608                local: args.local_side_metadata_specs,
609            },
610            acquire_lock: Mutex::new(()),
611            global_state: args.plan_args.global_state,
612            options: args.plan_args.options.clone(),
613            allocate_as_live: AtomicBool::new(false),
614            p: PhantomData,
615        };
616
617        let vmrequest = args.plan_args.vmrequest;
618        if vmrequest.is_discontiguous() {
619            rtn.contiguous = false;
620            // FIXME
621            rtn.descriptor = SpaceDescriptor::create_descriptor();
622            // VM.memory.setHeapRange(index, HEAP_START, HEAP_END);
623            return rtn;
624        }
625
626        let (extent, top) = match vmrequest {
627            VMRequest::Fraction { frac, top: _top } => (get_frac_available(frac), _top),
628            VMRequest::Extent {
629                extent: _extent,
630                top: _top,
631            } => (_extent, _top),
632            VMRequest::Fixed {
633                extent: _extent, ..
634            } => (_extent, false),
635            _ => unreachable!(),
636        };
637
638        assert!(
639            extent == raw_align_up(extent, BYTES_IN_CHUNK),
640            "{} requested non-aligned extent: {} bytes",
641            rtn.name,
642            extent
643        );
644
645        let start = if let VMRequest::Fixed { start: _start, .. } = vmrequest {
646            _start
647        } else {
648            // FIXME
649            //if (HeapLayout.vmMap.isFinalized()) VM.assertions.fail("heap is narrowed after regionMap is finalized: " + name);
650            args.plan_args.heap.reserve(extent, top)
651        };
652        assert!(
653            start == chunk_align_up(start),
654            "{} starting on non-aligned boundary: {}",
655            rtn.name,
656            start
657        );
658
659        rtn.contiguous = true;
660        rtn.start = start;
661        rtn.extent = extent;
662        // FIXME
663        rtn.descriptor = SpaceDescriptor::create_descriptor_from_heap_range(start, start + extent);
664        // VM.memory.setHeapRange(index, start, start.plus(extent));
665
666        // We only initialize our vm map if the range of the space is in our available heap range. For normally spaces,
667        // they are definitely in our heap range. But for VM space, a runtime could give us an arbitrary range. We only
668        // insert into our vm map if the range overlaps with our heap.
669        {
670            use crate::util::heap::layout;
671            let overlap =
672                Address::range_intersection(&(start..start + extent), &layout::available_range());
673            if !overlap.is_empty() {
674                args.plan_args.vm_map.insert(
675                    overlap.start,
676                    overlap.end - overlap.start,
677                    rtn.descriptor,
678                );
679            }
680        }
681
682        // For contiguous space, we know its address range so we reserve metadata memory for its range.
683        rtn.metadata
684            .try_map_metadata_address_range(rtn.start, rtn.extent, rtn.name)
685            .unwrap_or_else(|e| {
686                // TODO(Javad): handle meta space allocation failure
687                panic!("failed to mmap meta memory: {e}");
688            });
689
690        debug!(
691            "Created space {} [{}, {}) for {} bytes",
692            rtn.name,
693            start,
694            start + extent,
695            extent
696        );
697
698        rtn
699    }
700
701    pub fn initialize_sft(
702        &self,
703        sft: &(dyn SFT + Sync + 'static),
704        sft_map: &mut dyn crate::policy::sft_map::SFTMap,
705    ) {
706        // We have to keep this for now: if a space is contiguous, our page resource will NOT consider newly allocated chunks
707        // as new chunks (new_chunks = true). In that case, in grow_space(), we do not set SFT when new_chunks = false.
708        // We can fix this by either of these:
709        // * fix page resource, so it propelry returns new_chunk
710        // * change grow_space() so it sets SFT no matter what the new_chunks value is.
711        // FIXME: eagerly initializing SFT is not a good idea.
712        if self.contiguous {
713            unsafe { sft_map.eager_initialize(sft, self.start, self.extent) };
714        }
715    }
716
717    pub fn vm_map(&self) -> &'static dyn VMMap {
718        self.vm_map
719    }
720
721    pub fn mmap_protection(&self) -> MmapProtection {
722        if self.permission_exec || cfg!(feature = "exec_permission_on_all_spaces") {
723            MmapProtection::ReadWriteExec
724        } else {
725            MmapProtection::ReadWrite
726        }
727    }
728
729    pub(crate) fn debug_print_object_global_info(&self, object: ObjectReference) {
730        #[cfg(feature = "vo_bit")]
731        println!(
732            "vo bit = {}",
733            crate::util::metadata::vo_bit::is_vo_bit_set(object)
734        );
735        if self.needs_log_bit {
736            use crate::vm::object_model::ObjectModel;
737            use std::sync::atomic::Ordering;
738            println!(
739                "log bit = {}",
740                VM::VMObjectModel::GLOBAL_LOG_BIT_SPEC.is_unlogged::<VM>(object, Ordering::Relaxed),
741            );
742        }
743        println!("is live = {}", object.is_live());
744    }
745}
746
747fn get_frac_available(frac: f32) -> usize {
748    trace!("AVAILABLE_START={}", vm_layout().available_start());
749    trace!("AVAILABLE_END={}", vm_layout().available_end());
750    let bytes = (frac * vm_layout().available_bytes() as f32) as usize;
751    trace!("bytes={}*{}={}", frac, vm_layout().available_bytes(), bytes);
752    let mb = bytes >> LOG_BYTES_IN_MBYTE;
753    let rtn = mb << LOG_BYTES_IN_MBYTE;
754    trace!("rtn={}", rtn);
755    let aligned_rtn = raw_align_up(rtn, BYTES_IN_CHUNK);
756    trace!("aligned_rtn={}", aligned_rtn);
757    aligned_rtn
758}
759
760pub fn required_chunks(pages: usize) -> usize {
761    let extent = raw_align_up(pages_to_bytes(pages), BYTES_IN_CHUNK);
762    extent >> LOG_BYTES_IN_CHUNK
763}