mmtk/
mmtk.rs

1//! MMTk instance.
2use crate::global_state::{GcStatus, GlobalState};
3use crate::plan::CreateGeneralPlanArgs;
4use crate::plan::Plan;
5use crate::policy::sft_map::{create_sft_map, SFTMap};
6use crate::scheduler::GCWorkScheduler;
7
8#[cfg(feature = "vo_bit")]
9use crate::util::address::ObjectReference;
10#[cfg(feature = "analysis")]
11use crate::util::analysis::AnalysisManager;
12use crate::util::finalizable_processor::FinalizableProcessor;
13use crate::util::heap::gc_trigger::GCTrigger;
14use crate::util::heap::layout::heap_parameters::MAX_SPACES;
15use crate::util::heap::layout::vm_layout::{vm_layout, VMLayout};
16use crate::util::heap::layout::{self, Mmapper, VMMap};
17use crate::util::heap::HeapMeta;
18use crate::util::opaque_pointer::*;
19use crate::util::options::Options;
20use crate::util::reference_processor::ReferenceProcessors;
21#[cfg(feature = "sanity")]
22use crate::util::sanity::sanity_checker::SanityChecker;
23#[cfg(feature = "extreme_assertions")]
24use crate::util::slot_logger::SlotLogger;
25use crate::util::statistics::stats::Stats;
26use crate::vm::ReferenceGlue;
27use crate::vm::VMBinding;
28use std::cell::UnsafeCell;
29use std::collections::HashMap;
30use std::default::Default;
31#[cfg(feature = "sanity")]
32use std::sync::atomic::AtomicBool;
33use std::sync::atomic::Ordering;
34use std::sync::Arc;
35use std::sync::Mutex;
36
37lazy_static! {
38    // I am not sure if we should include these mmappers as part of MMTk struct.
39    // The considerations are:
40    // 1. We need VMMap and Mmapper to create spaces. It is natural that the mappers are not
41    //    part of MMTK, as creating MMTK requires these mappers. We could use Rc/Arc for these mappers though.
42    // 2. These mmappers are possibly global across multiple MMTk instances, as they manage the
43    //    entire address space.
44    // TODO: We should refactor this when we know more about how multiple MMTK instances work.
45
46    /// A global VMMap that manages the mapping of spaces to virtual memory ranges.
47    pub static ref VM_MAP: Box<dyn VMMap + Send + Sync> = layout::create_vm_map();
48
49    /// A global Mmapper for mmaping and protection of virtual memory.
50    pub static ref MMAPPER: Box<dyn Mmapper> = layout::create_mmapper();
51}
52
53use crate::util::rust_util::InitializeOnce;
54
55// A global space function table that allows efficient dispatch space specific code for addresses in our heap.
56pub static SFT_MAP: InitializeOnce<Box<dyn SFTMap>> = InitializeOnce::new();
57
58/// MMTk builder. This is used to set options and other settings before actually creating an MMTk instance.
59pub struct MMTKBuilder {
60    /// The options for this instance.
61    pub options: Options,
62}
63
64impl MMTKBuilder {
65    /// Create an MMTK builder with options read from environment variables, or using built-in
66    /// default if not overridden by environment variables.
67    pub fn new() -> Self {
68        let mut builder = Self::new_no_env_vars();
69        builder.options.read_env_var_settings();
70        builder
71    }
72
73    /// Create an MMTK builder with build-in default options, but without reading options from
74    /// environment variables.
75    pub fn new_no_env_vars() -> Self {
76        MMTKBuilder {
77            options: Options::default(),
78        }
79    }
80
81    /// Set an option.
82    pub fn set_option(&mut self, name: &str, val: &str) -> bool {
83        self.options.set_from_string(name, val)
84    }
85
86    /// Set multiple options by a string. The string should be key-value pairs separated by white spaces,
87    /// such as `threads=1 stress_factor=4096`.
88    pub fn set_options_bulk_by_str(&mut self, options: &str) -> bool {
89        self.options.set_bulk_from_string(options)
90    }
91
92    /// Custom VM layout constants. VM bindings may use this function for compressed or 39-bit heap support.
93    /// This function must be called before MMTk::new()
94    pub fn set_vm_layout(&mut self, constants: VMLayout) {
95        VMLayout::set_custom_vm_layout(constants)
96    }
97
98    /// Build an MMTk instance from the builder.
99    pub fn build<VM: VMBinding>(&self) -> MMTK<VM> {
100        MMTK::new(Arc::new(self.options.clone()))
101    }
102}
103
104impl Default for MMTKBuilder {
105    fn default() -> Self {
106        Self::new()
107    }
108}
109
110/// An MMTk instance. MMTk allows multiple instances to run independently, and each instance gives users a separate heap.
111/// *Note that multi-instances is not fully supported yet*
112pub struct MMTK<VM: VMBinding> {
113    pub(crate) options: Arc<Options>,
114    pub(crate) state: Arc<GlobalState>,
115    pub(crate) plan: UnsafeCell<Box<dyn Plan<VM = VM>>>,
116    pub(crate) reference_processors: ReferenceProcessors,
117    pub(crate) finalizable_processor:
118        Mutex<FinalizableProcessor<<VM::VMReferenceGlue as ReferenceGlue<VM>>::FinalizableType>>,
119    pub(crate) scheduler: Arc<GCWorkScheduler<VM>>,
120    #[cfg(feature = "sanity")]
121    pub(crate) sanity_checker: Mutex<SanityChecker<VM::VMSlot>>,
122    #[cfg(feature = "extreme_assertions")]
123    pub(crate) slot_logger: SlotLogger<VM::VMSlot>,
124    pub(crate) gc_trigger: Arc<GCTrigger<VM>>,
125    pub(crate) stats: Arc<Stats>,
126    #[cfg(feature = "sanity")]
127    inside_sanity: AtomicBool,
128    /// Analysis counters. The feature analysis allows us to periodically stop the world and collect some statistics.
129    #[cfg(feature = "analysis")]
130    pub(crate) analysis_manager: Arc<AnalysisManager<VM>>,
131}
132
133unsafe impl<VM: VMBinding> Sync for MMTK<VM> {}
134unsafe impl<VM: VMBinding> Send for MMTK<VM> {}
135
136impl<VM: VMBinding> MMTK<VM> {
137    /// Create an MMTK instance. This is not public. Bindings should use [`MMTKBuilder::build`].
138    pub(crate) fn new(options: Arc<Options>) -> Self {
139        // Verify the Mmapper can handle the required address space size.
140        vm_layout().validate_address_space();
141
142        // Initialize SFT first in case we need to use this in the constructor.
143        // The first call will initialize SFT map. Other calls will be blocked until SFT map is initialized.
144        crate::policy::sft_map::SFTRefStorage::pre_use_check();
145        SFT_MAP.initialize_once(&create_sft_map);
146
147        let num_workers = if cfg!(feature = "single_worker") {
148            1
149        } else {
150            *options.threads
151        };
152
153        let scheduler = GCWorkScheduler::new(num_workers, (*options.thread_affinity).clone());
154
155        let state = Arc::new(GlobalState::default());
156
157        let gc_trigger = Arc::new(GCTrigger::new(
158            options.clone(),
159            scheduler.clone(),
160            state.clone(),
161        ));
162
163        let stats = Arc::new(Stats::new(&options));
164
165        // We need this during creating spaces, but we do not use this once the MMTk instance is created.
166        // So we do not save it in MMTK. This may change in the future.
167        let mut heap = HeapMeta::new();
168
169        let mut plan = crate::plan::create_plan(
170            *options.plan,
171            CreateGeneralPlanArgs {
172                vm_map: VM_MAP.as_ref(),
173                mmapper: MMAPPER.as_ref(),
174                options: options.clone(),
175                state: state.clone(),
176                gc_trigger: gc_trigger.clone(),
177                scheduler: scheduler.clone(),
178                stats: &stats,
179                heap: &mut heap,
180            },
181        );
182
183        // We haven't finished creating MMTk. No one is using the GC trigger. We cast the arc into a mutable reference.
184        {
185            // TODO: use Arc::get_mut_unchecked() when it is availble.
186            let gc_trigger: &mut GCTrigger<VM> =
187                unsafe { &mut *(Arc::as_ptr(&gc_trigger) as *mut _) };
188            // We know the plan address will not change. Cast it to a static reference.
189            let static_plan: &'static dyn Plan<VM = VM> = unsafe { &*(&*plan as *const _) };
190            // Set the plan so we can trigger GC and check GC condition without using plan
191            gc_trigger.set_plan(static_plan);
192        }
193
194        // TODO: This probably does not work if we have multiple MMTk instances.
195        // This needs to be called after we create Plan. It needs to use HeapMeta, which is gradually built when we create spaces.
196        VM_MAP.finalize_static_space_map(
197            heap.get_discontig_start(),
198            heap.get_discontig_end(),
199            &mut |start_address| {
200                plan.for_each_space_mut(&mut |space| {
201                    // If the `VMMap` has a discontiguous memory range, we notify all discontiguous
202                    // space that the starting address has been determined.
203                    if let Some(pr) = space.maybe_get_page_resource_mut() {
204                        pr.update_discontiguous_start(start_address);
205                    }
206                })
207            },
208        );
209
210        MMTK {
211            options,
212            state,
213            plan: UnsafeCell::new(plan),
214            reference_processors: ReferenceProcessors::new(),
215            finalizable_processor: Mutex::new(FinalizableProcessor::<
216                <VM::VMReferenceGlue as ReferenceGlue<VM>>::FinalizableType,
217            >::new()),
218            scheduler,
219            #[cfg(feature = "sanity")]
220            sanity_checker: Mutex::new(SanityChecker::new()),
221            #[cfg(feature = "sanity")]
222            inside_sanity: AtomicBool::new(false),
223            #[cfg(feature = "extreme_assertions")]
224            slot_logger: SlotLogger::new(),
225            #[cfg(feature = "analysis")]
226            analysis_manager: Arc::new(AnalysisManager::new(stats.clone())),
227            gc_trigger,
228            stats,
229        }
230    }
231
232    /// Initialize the GC worker threads that are required for doing garbage collections.
233    /// This is a mandatory call for a VM during its boot process once its thread system
234    /// is ready.
235    ///
236    /// Internally, this function will invoke [`Collection::spawn_gc_thread()`] to spawn GC worker
237    /// threads.
238    ///
239    /// # Arguments
240    ///
241    /// *   `tls`: The thread that wants to enable the collection. This value will be passed back
242    ///     to the VM in [`Collection::spawn_gc_thread()`] so that the VM knows the context.
243    ///
244    /// [`Collection::spawn_gc_thread()`]: crate::vm::Collection::spawn_gc_thread()
245    pub fn initialize_collection(&'static self, tls: VMThread) {
246        assert!(
247            !self.state.is_initialized(),
248            "MMTk collection has been initialized (was initialize_collection() already called before?)"
249        );
250        self.scheduler.spawn_gc_threads(self, tls);
251        self.state.initialized.store(true, Ordering::SeqCst);
252        probe!(mmtk, collection_initialized);
253    }
254
255    /// Prepare an MMTk instance for calling the `fork()` system call.
256    ///
257    /// The `fork()` system call is available on Linux and some UNIX variants, and may be emulated
258    /// on other platforms by libraries such as Cygwin.  The properties of the `fork()` system call
259    /// requires the users to do some preparation before calling it.
260    ///
261    /// -   **Multi-threading**:  If `fork()` is called when the process has multiple threads, it
262    ///     will only duplicate the current thread into the child process, and the child process can
263    ///     only call async-signal-safe functions, notably `exec()`.  For VMs that that use
264    ///     multi-process concurrency, it is imperative that when calling `fork()`, only one thread may
265    ///     exist in the process.
266    ///
267    /// -   **File descriptors**: The child process inherits copies of the parent's set of open
268    ///     file descriptors.  This may or may not be desired depending on use cases.
269    ///
270    /// This function helps VMs that use `fork()` for multi-process concurrency.  It instructs all
271    /// GC threads to save their contexts and return from their entry-point functions.  Currently,
272    /// such threads only include GC workers, and the entry point is
273    /// [`crate::memory_manager::start_worker`].  A subsequent call to `MMTK::after_fork()` will
274    /// re-spawn the threads using their saved contexts.  The VM must not allocate objects in the
275    /// MMTk heap before calling `MMTK::after_fork()`.
276    ///
277    /// TODO: Currently, the MMTk core does not keep any files open for a long time.  In the
278    /// future, this function and the `after_fork` function may be used for handling open file
279    /// descriptors across invocations of `fork()`.  One possible use case is logging GC activities
280    /// and statistics to files, such as performing heap dumps across multiple GCs.
281    ///
282    /// If a VM intends to execute another program by calling `fork()` and immediately calling
283    /// `exec`, it may skip this function because the state of the MMTk instance will be irrelevant
284    /// in that case.
285    ///
286    /// # Caution!
287    ///
288    /// This function sends an asynchronous message to GC threads and returns immediately, but it
289    /// is only safe for the VM to call `fork()` after the underlying **native threads** of the GC
290    /// threads have exited.  After calling this function, the VM should wait for their underlying
291    /// native threads to exit in VM-specific manner before calling `fork()`.
292    pub fn prepare_to_fork(&'static self) {
293        assert!(
294            self.state.is_initialized(),
295            "MMTk collection has not been initialized, yet (was initialize_collection() called before?)"
296        );
297        probe!(mmtk, prepare_to_fork);
298        self.scheduler.stop_gc_threads_for_forking();
299    }
300
301    /// Call this function after the VM called the `fork()` system call.
302    ///
303    /// This function will re-spawn MMTk threads from saved contexts.
304    ///
305    /// # Arguments
306    ///
307    /// *   `tls`: The thread that wants to respawn MMTk threads after forking. This value will be
308    ///     passed back to the VM in `Collection::spawn_gc_thread()` so that the VM knows the
309    ///     context.
310    pub fn after_fork(&'static self, tls: VMThread) {
311        assert!(
312            self.state.is_initialized(),
313            "MMTk collection has not been initialized, yet (was initialize_collection() called before?)"
314        );
315        probe!(mmtk, after_fork);
316        self.scheduler.respawn_gc_threads_after_forking(tls);
317    }
318
319    /// Generic hook to allow benchmarks to be harnessed. MMTk will trigger a GC
320    /// to clear any residual garbage and start collecting statistics for the benchmark.
321    /// This is usually called by the benchmark harness as its last step before the actual benchmark.
322    pub fn harness_begin(&self, tls: VMMutatorThread) {
323        probe!(mmtk, harness_begin);
324        self.handle_user_collection_request(tls, true, true);
325        self.state.inside_harness.store(true, Ordering::SeqCst);
326        self.stats.start_all();
327        self.scheduler.enable_stat();
328    }
329
330    /// Generic hook to allow benchmarks to be harnessed. MMTk will stop collecting
331    /// statistics, and print out the collected statistics in a defined format.
332    /// This is usually called by the benchmark harness right after the actual benchmark.
333    pub fn harness_end(&'static self) {
334        self.stats.stop_all(self);
335        self.state.inside_harness.store(false, Ordering::SeqCst);
336        probe!(mmtk, harness_end);
337    }
338
339    #[cfg(feature = "sanity")]
340    pub(crate) fn sanity_begin(&self) {
341        self.inside_sanity.store(true, Ordering::Relaxed)
342    }
343
344    #[cfg(feature = "sanity")]
345    pub(crate) fn sanity_end(&self) {
346        self.inside_sanity.store(false, Ordering::Relaxed)
347    }
348
349    #[cfg(feature = "sanity")]
350    pub(crate) fn is_in_sanity(&self) -> bool {
351        self.inside_sanity.load(Ordering::Relaxed)
352    }
353
354    pub(crate) fn set_gc_status(&self, s: GcStatus) {
355        let mut gc_status = self.state.gc_status.lock().unwrap();
356        if *gc_status == GcStatus::NotInGC {
357            self.state.stacks_prepared.store(false, Ordering::SeqCst);
358            // FIXME stats
359            self.stats.start_gc();
360        }
361        *gc_status = s;
362        if *gc_status == GcStatus::NotInGC {
363            // FIXME stats
364            if self.stats.get_gathering_stats() {
365                self.stats.end_gc();
366            }
367        }
368    }
369
370    /// Return true if a collection is in progress.
371    pub fn gc_in_progress(&self) -> bool {
372        *self.state.gc_status.lock().unwrap() != GcStatus::NotInGC
373    }
374
375    /// Return true if a collection is in progress and past the preparatory stage.
376    pub fn gc_in_progress_proper(&self) -> bool {
377        *self.state.gc_status.lock().unwrap() == GcStatus::GcProper
378    }
379
380    /// Return true if the current GC is an emergency GC.
381    ///
382    /// An emergency GC happens when a normal GC cannot reclaim enough memory to satisfy allocation
383    /// requests.  Plans may do full-heap GC, defragmentation, etc. during emergency GCs in order to
384    /// free up more memory.
385    ///
386    /// VM bindings can call this function during GC to check if the current GC is an emergency GC.
387    /// If it is, the VM binding is recommended to retain fewer objects than normal GCs, to the
388    /// extent allowed by the specification of the VM or the language.  For example, the VM binding
389    /// may choose not to retain objects used for caching.  Specifically, for Java virtual machines,
390    /// that means not retaining referents of [`SoftReference`][java-soft-ref] which is primarily
391    /// designed for implementing memory-sensitive caches.
392    ///
393    /// [java-soft-ref]: https://docs.oracle.com/en/java/javase/21/docs/api/java.base/java/lang/ref/SoftReference.html
394    pub fn is_emergency_collection(&self) -> bool {
395        self.state.is_emergency_collection()
396    }
397
398    /// Return true if the current GC is trigger manually by the user/binding.
399    pub fn is_user_triggered_collection(&self) -> bool {
400        self.state.is_user_triggered_collection()
401    }
402
403    /// The application code has requested a collection. This is just a GC hint, and
404    /// we may ignore it.
405    ///
406    /// Returns whether a GC was ran or not. If MMTk triggers a GC, this method will block the
407    /// calling thread and return true when the GC finishes. Otherwise, this method returns
408    /// false immediately.
409    ///
410    /// # Arguments
411    /// * `tls`: The mutator thread that requests the GC
412    /// * `force`: The request cannot be ignored (except for NoGC)
413    /// * `exhaustive`: The requested GC should be exhaustive. This is also a hint.
414    pub fn handle_user_collection_request(
415        &self,
416        tls: VMMutatorThread,
417        force: bool,
418        exhaustive: bool,
419    ) -> bool {
420        if self
421            .gc_trigger
422            .handle_user_collection_request(force, exhaustive)
423        {
424            use crate::vm::Collection;
425            VM::VMCollection::block_for_gc(tls);
426            true
427        } else {
428            false
429        }
430    }
431
432    /// MMTK has requested stop-the-world activity (e.g., stw within a concurrent gc).
433    #[allow(unused)]
434    pub fn trigger_internal_collection_request(&self) {
435        self.gc_trigger.trigger_internal_collection_request();
436    }
437
438    /// Get a reference to the plan.
439    pub fn get_plan(&self) -> &dyn Plan<VM = VM> {
440        unsafe { &**(self.plan.get()) }
441    }
442
443    /// Get the plan as mutable reference.
444    ///
445    /// # Safety
446    ///
447    /// This is unsafe because the caller must ensure that the plan is not used by other threads.
448    #[allow(clippy::mut_from_ref)]
449    pub unsafe fn get_plan_mut(&self) -> &mut dyn Plan<VM = VM> {
450        &mut **(self.plan.get())
451    }
452
453    /// Get the run time options.
454    pub fn get_options(&self) -> &Options {
455        &self.options
456    }
457
458    /// Enumerate objects in all spaces in this MMTK instance.
459    ///
460    /// The call-back function `f` is called for every object that has the valid object bit (VO
461    /// bit), i.e. objects that are allocated in the heap of this MMTK instance, but has not been
462    /// reclaimed, yet.
463    ///
464    /// # Notes about object initialization and finalization
465    ///
466    /// When this function visits an object, it only guarantees that its VO bit must have been set.
467    /// It is not guaranteed if the object has been "fully initialized" in the sense of the
468    /// programming language the VM is implementing.  For example, the object header and the type
469    /// information may not have been written.
470    ///
471    /// It will also visit objects that have been "finalized" in the sense of the programming
472    /// langauge the VM is implementing, as long as the object has not been reclaimed by the GC,
473    /// yet.  Be careful.  If the object header is destroyed, it may not be safe to access such
474    /// objects in the high-level language.
475    ///
476    /// # Interaction with allocation and GC
477    ///
478    /// This function does not mutate the heap.  It is safe if multiple threads execute this
479    /// function concurrently during mutator time.
480    ///
481    /// It has *undefined behavior* if allocation or GC happens while this function is being
482    /// executed.  The VM binding must ensure no threads are allocating and GC does not start while
483    /// executing this function.  One way to do this is stopping all mutators before calling this
484    /// function.
485    ///
486    /// Some high-level languages may provide an API that allows the user to allocate objects and
487    /// trigger GC while enumerating objects.  One example is [`ObjectSpace::each_object`][os_eo] in
488    /// Ruby.  The VM binding may use the callback of this function to save all visited object
489    /// references and let the user visit those references after this function returns.  Make sure
490    /// those saved references are in the root set or in an object that will live through GCs before
491    /// the high-level language finishes visiting the saved object references.
492    ///
493    /// [os_eo]: https://docs.ruby-lang.org/en/master/ObjectSpace.html#method-c-each_object
494    #[cfg(feature = "vo_bit")]
495    pub fn enumerate_objects<F>(&self, f: F)
496    where
497        F: FnMut(ObjectReference),
498    {
499        use crate::util::object_enum;
500
501        let mut enumerator = object_enum::ClosureObjectEnumerator::<_, VM>::new(f);
502        let plan = self.get_plan();
503        plan.for_each_space(&mut |space| {
504            space.enumerate_objects(&mut enumerator);
505        })
506    }
507
508    /// Aggregate a hash map of live bytes per space with the space stats to produce
509    /// a map of live bytes stats for the spaces.
510    pub(crate) fn aggregate_live_bytes_in_last_gc(
511        &self,
512        live_bytes_per_space: [usize; MAX_SPACES],
513    ) -> HashMap<&'static str, crate::LiveBytesStats> {
514        use crate::policy::space::Space;
515        let mut ret = HashMap::new();
516        self.get_plan().for_each_space(&mut |space: &dyn Space<VM>| {
517            let space_name = space.get_name();
518            let space_idx = space.get_descriptor().get_index();
519            let used_pages = space.reserved_pages();
520            if used_pages != 0 {
521                let used_bytes = crate::util::conversions::pages_to_bytes(used_pages);
522                let live_bytes = live_bytes_per_space[space_idx];
523                debug_assert!(
524                    live_bytes <= used_bytes,
525                    "Live bytes of objects in {} ({} bytes) is larger than used pages ({} bytes), something is wrong.",
526                    space_name, live_bytes, used_bytes
527                );
528                ret.insert(space_name, crate::LiveBytesStats {
529                    live_bytes,
530                    used_pages,
531                    used_bytes,
532                });
533            }
534        });
535        ret
536    }
537
538    /// Print VM maps.  It will print the memory ranges used by spaces as well as some attributes of
539    /// the spaces.
540    ///
541    /// -   "I": The space is immortal.  Its objects will never die.
542    /// -   "N": The space is non-movable.  Its objects will never move.
543    ///
544    /// Arguments:
545    /// *   `out`: the place to print the VM maps.
546    /// *   `space_name`: If `None`, print all spaces;
547    ///     if `Some(n)`, only print the space whose name is `n`.
548    pub fn debug_print_vm_maps(
549        &self,
550        out: &mut impl std::fmt::Write,
551        space_name: Option<&str>,
552    ) -> Result<(), std::fmt::Error> {
553        let mut result_so_far = Ok(());
554        self.get_plan().for_each_space(&mut |space| {
555            if result_so_far.is_ok()
556                && (space_name.is_none() || space_name == Some(space.get_name()))
557            {
558                result_so_far = crate::policy::space::print_vm_map(space, out);
559            }
560        });
561        result_so_far
562    }
563
564    /// Initialize object metadata for a VM space object.
565    /// Objects in the VM space are allocated/managed by the binding. This function provides a way for
566    /// the binding to set object metadata in MMTk for an object in the space.
567    #[cfg(feature = "vm_space")]
568    pub fn initialize_vm_space_object(&self, object: crate::util::ObjectReference) {
569        use crate::policy::sft::SFT;
570        self.get_plan()
571            .base()
572            .vm_space
573            .initialize_object_metadata(object)
574    }
575}
576
577/// A non-mangled function to print object information for debugging purposes. This function can be directly
578/// called from a debugger.
579#[no_mangle]
580pub fn mmtk_debug_print_object(object: crate::util::ObjectReference) {
581    // If the address is unmapped, we cannot access its metadata. Just quit.
582    if !object.to_raw_address().is_mapped() {
583        println!("{} is not mapped in MMTk", object);
584        return;
585    }
586
587    // If the address is not aligned to the object reference size, it is not an object reference.
588    if !object
589        .to_raw_address()
590        .is_aligned_to(crate::util::ObjectReference::ALIGNMENT)
591    {
592        println!(
593            "{} is not properly aligned. It is not an object reference.",
594            object
595        );
596    }
597
598    // Forward to the space
599    let sft = SFT_MAP.get_checked(object.to_raw_address());
600    // Print the space name
601    println!("In {}:", sft.name());
602    // Print object information
603    sft.debug_print_object_info(object);
604}