mmtk/mmtk.rs
1//! MMTk instance.
2use crate::global_state::{GcStatus, GlobalState};
3use crate::plan::CreateGeneralPlanArgs;
4use crate::plan::Plan;
5use crate::policy::sft_map::{create_sft_map, SFTMap};
6use crate::scheduler::GCWorkScheduler;
7
8#[cfg(feature = "vo_bit")]
9use crate::util::address::ObjectReference;
10#[cfg(feature = "analysis")]
11use crate::util::analysis::AnalysisManager;
12use crate::util::finalizable_processor::FinalizableProcessor;
13use crate::util::heap::gc_trigger::GCTrigger;
14use crate::util::heap::layout::heap_parameters::MAX_SPACES;
15use crate::util::heap::layout::vm_layout::{vm_layout, VMLayout};
16use crate::util::heap::layout::{self, Mmapper, VMMap};
17use crate::util::heap::HeapMeta;
18use crate::util::opaque_pointer::*;
19use crate::util::options::Options;
20use crate::util::reference_processor::ReferenceProcessors;
21#[cfg(feature = "sanity")]
22use crate::util::sanity::sanity_checker::SanityChecker;
23#[cfg(feature = "extreme_assertions")]
24use crate::util::slot_logger::SlotLogger;
25use crate::util::statistics::stats::Stats;
26#[cfg(feature = "vm_space")]
27use crate::vm::object_model::ObjectModel;
28use crate::vm::ReferenceGlue;
29use crate::vm::VMBinding;
30use std::cell::UnsafeCell;
31use std::collections::HashMap;
32use std::default::Default;
33#[cfg(feature = "sanity")]
34use std::sync::atomic::AtomicBool;
35use std::sync::atomic::Ordering;
36use std::sync::Arc;
37use std::sync::Mutex;
38
39lazy_static! {
40 // I am not sure if we should include these mmappers as part of MMTk struct.
41 // The considerations are:
42 // 1. We need VMMap and Mmapper to create spaces. It is natural that the mappers are not
43 // part of MMTK, as creating MMTK requires these mappers. We could use Rc/Arc for these mappers though.
44 // 2. These mmappers are possibly global across multiple MMTk instances, as they manage the
45 // entire address space.
46 // TODO: We should refactor this when we know more about how multiple MMTK instances work.
47
48 /// A global VMMap that manages the mapping of spaces to virtual memory ranges.
49 pub static ref VM_MAP: Box<dyn VMMap + Send + Sync> = layout::create_vm_map();
50
51 /// A global Mmapper for mmaping and protection of virtual memory.
52 pub static ref MMAPPER: Box<dyn Mmapper> = layout::create_mmapper();
53}
54
55use crate::util::rust_util::InitializeOnce;
56
57// A global space function table that allows efficient dispatch space specific code for addresses in our heap.
58pub static SFT_MAP: InitializeOnce<Box<dyn SFTMap>> = InitializeOnce::new();
59
60/// MMTk builder. This is used to set options and other settings before actually creating an MMTk instance.
61pub struct MMTKBuilder {
62 /// The options for this instance.
63 pub options: Options,
64}
65
66impl MMTKBuilder {
67 /// Create an MMTK builder with options read from environment variables, or using built-in
68 /// default if not overridden by environment variables.
69 pub fn new() -> Self {
70 let mut builder = Self::new_no_env_vars();
71 builder.options.read_env_var_settings();
72 builder
73 }
74
75 /// Create an MMTK builder with build-in default options, but without reading options from
76 /// environment variables.
77 pub fn new_no_env_vars() -> Self {
78 MMTKBuilder {
79 options: Options::default(),
80 }
81 }
82
83 /// Set an option.
84 pub fn set_option(&mut self, name: &str, val: &str) -> bool {
85 self.options.set_from_string(name, val)
86 }
87
88 /// Set multiple options by a string. The string should be key-value pairs separated by white spaces,
89 /// such as `threads=1 stress_factor=4096`.
90 pub fn set_options_bulk_by_str(&mut self, options: &str) -> bool {
91 self.options.set_bulk_from_string(options)
92 }
93
94 /// Custom VM layout constants. VM bindings may use this function for compressed or 39-bit heap support.
95 /// This function must be called before MMTk::new()
96 pub fn set_vm_layout(&mut self, constants: VMLayout) {
97 VMLayout::set_custom_vm_layout(constants)
98 }
99
100 /// Build an MMTk instance from the builder.
101 pub fn build<VM: VMBinding>(&self) -> MMTK<VM> {
102 MMTK::new(Arc::new(self.options.clone()))
103 }
104}
105
106impl Default for MMTKBuilder {
107 fn default() -> Self {
108 Self::new()
109 }
110}
111
112/// An MMTk instance. MMTk allows multiple instances to run independently, and each instance gives users a separate heap.
113/// *Note that multi-instances is not fully supported yet*
114pub struct MMTK<VM: VMBinding> {
115 pub(crate) options: Arc<Options>,
116 pub(crate) state: Arc<GlobalState>,
117 pub(crate) plan: UnsafeCell<Box<dyn Plan<VM = VM>>>,
118 pub(crate) reference_processors: ReferenceProcessors,
119 pub(crate) finalizable_processor:
120 Mutex<FinalizableProcessor<<VM::VMReferenceGlue as ReferenceGlue<VM>>::FinalizableType>>,
121 pub(crate) scheduler: Arc<GCWorkScheduler<VM>>,
122 #[cfg(feature = "sanity")]
123 pub(crate) sanity_checker: Mutex<SanityChecker<VM::VMSlot>>,
124 #[cfg(feature = "extreme_assertions")]
125 pub(crate) slot_logger: SlotLogger<VM::VMSlot>,
126 pub(crate) gc_trigger: Arc<GCTrigger<VM>>,
127 pub(crate) stats: Arc<Stats>,
128 #[cfg(feature = "sanity")]
129 inside_sanity: AtomicBool,
130 /// Analysis counters. The feature analysis allows us to periodically stop the world and collect some statistics.
131 #[cfg(feature = "analysis")]
132 pub(crate) analysis_manager: Arc<AnalysisManager<VM>>,
133}
134
135unsafe impl<VM: VMBinding> Sync for MMTK<VM> {}
136unsafe impl<VM: VMBinding> Send for MMTK<VM> {}
137
138impl<VM: VMBinding> MMTK<VM> {
139 /// Create an MMTK instance. This is not public. Bindings should use [`MMTKBuilder::build`].
140 pub(crate) fn new(options: Arc<Options>) -> Self {
141 // Verify the Mmapper can handle the required address space size.
142 vm_layout().validate_address_space();
143
144 // Initialize SFT first in case we need to use this in the constructor.
145 // The first call will initialize SFT map. Other calls will be blocked until SFT map is initialized.
146 crate::policy::sft_map::SFTRefStorage::pre_use_check();
147 SFT_MAP.initialize_once(&create_sft_map);
148
149 let num_workers = if cfg!(feature = "single_worker") {
150 1
151 } else {
152 *options.threads
153 };
154
155 let scheduler = GCWorkScheduler::new(num_workers, (*options.thread_affinity).clone());
156
157 let state = Arc::new(GlobalState::default());
158
159 let gc_trigger = Arc::new(GCTrigger::new(
160 options.clone(),
161 scheduler.clone(),
162 state.clone(),
163 ));
164
165 let stats = Arc::new(Stats::new(&options));
166
167 // Initialize side metadata runtime state and reserve its address range before creating
168 // spaces. Plan/space initialization may map side metadata during setup.
169 crate::util::metadata::side_metadata::initialize_side_metadata::<VM>(&options);
170
171 // We need this during creating spaces, but we do not use this once the MMTk instance is created.
172 // So we do not save it in MMTK. This may change in the future.
173 let mut heap = HeapMeta::new();
174
175 let mut plan = crate::plan::create_plan(
176 *options.plan,
177 CreateGeneralPlanArgs {
178 vm_map: VM_MAP.as_ref(),
179 mmapper: MMAPPER.as_ref(),
180 options: options.clone(),
181 state: state.clone(),
182 gc_trigger: gc_trigger.clone(),
183 scheduler: scheduler.clone(),
184 stats: &stats,
185 heap: &mut heap,
186 },
187 );
188
189 // We haven't finished creating MMTk. No one is using the GC trigger. We cast the arc into a mutable reference.
190 {
191 // TODO: use Arc::get_mut_unchecked() when it is availble.
192 let gc_trigger: &mut GCTrigger<VM> =
193 unsafe { &mut *(Arc::as_ptr(&gc_trigger) as *mut _) };
194 // We know the plan address will not change. Cast it to a static reference.
195 let static_plan: &'static dyn Plan<VM = VM> = unsafe { &*(&*plan as *const _) };
196 // Set the plan so we can trigger GC and check GC condition without using plan
197 gc_trigger.set_plan(static_plan);
198 }
199
200 // TODO: This probably does not work if we have multiple MMTk instances.
201 // This needs to be called after we create Plan. It needs to use HeapMeta, which is gradually built when we create spaces.
202 VM_MAP.finalize_static_space_map(
203 heap.get_discontig_start(),
204 heap.get_discontig_end(),
205 &mut |start_address| {
206 plan.for_each_space_mut(&mut |space| {
207 // If the `VMMap` has a discontiguous memory range, we notify all discontiguous
208 // space that the starting address has been determined.
209 if let Some(pr) = space.maybe_get_page_resource_mut() {
210 pr.update_discontiguous_start(start_address);
211 }
212 })
213 },
214 );
215
216 // The order here is important:
217 // Initialize side metadat sanity first
218 plan.verify_side_metadata_sanity();
219 // Then intiialize SFT because it may use side metadata
220 plan.initialize_sft();
221
222 MMTK {
223 options,
224 state,
225 plan: UnsafeCell::new(plan),
226 reference_processors: ReferenceProcessors::new(),
227 finalizable_processor: Mutex::new(FinalizableProcessor::<
228 <VM::VMReferenceGlue as ReferenceGlue<VM>>::FinalizableType,
229 >::new()),
230 scheduler,
231 #[cfg(feature = "sanity")]
232 sanity_checker: Mutex::new(SanityChecker::new()),
233 #[cfg(feature = "sanity")]
234 inside_sanity: AtomicBool::new(false),
235 #[cfg(feature = "extreme_assertions")]
236 slot_logger: SlotLogger::new(),
237 #[cfg(feature = "analysis")]
238 analysis_manager: Arc::new(AnalysisManager::new(stats.clone())),
239 gc_trigger,
240 stats,
241 }
242 }
243
244 /// Initialize the GC worker threads that are required for doing garbage collections.
245 /// This is a mandatory call for a VM during its boot process once its thread system
246 /// is ready.
247 ///
248 /// Internally, this function will invoke [`Collection::spawn_gc_thread()`] to spawn GC worker
249 /// threads.
250 ///
251 /// # Arguments
252 ///
253 /// * `tls`: The thread that wants to enable the collection. This value will be passed back
254 /// to the VM in [`Collection::spawn_gc_thread()`] so that the VM knows the context.
255 ///
256 /// [`Collection::spawn_gc_thread()`]: crate::vm::Collection::spawn_gc_thread()
257 pub fn initialize_collection(&'static self, tls: VMThread) {
258 assert!(
259 !self.state.is_initialized(),
260 "MMTk collection has been initialized (was initialize_collection() already called before?)"
261 );
262 self.scheduler.spawn_gc_threads(self, tls);
263 self.state.initialized.store(true, Ordering::SeqCst);
264 probe!(mmtk, collection_initialized);
265 }
266
267 /// Prepare an MMTk instance for calling the `fork()` system call.
268 ///
269 /// The `fork()` system call is available on Linux and some UNIX variants, and may be emulated
270 /// on other platforms by libraries such as Cygwin. The properties of the `fork()` system call
271 /// requires the users to do some preparation before calling it.
272 ///
273 /// - **Multi-threading**: If `fork()` is called when the process has multiple threads, it
274 /// will only duplicate the current thread into the child process, and the child process can
275 /// only call async-signal-safe functions, notably `exec()`. For VMs that that use
276 /// multi-process concurrency, it is imperative that when calling `fork()`, only one thread may
277 /// exist in the process.
278 ///
279 /// - **File descriptors**: The child process inherits copies of the parent's set of open
280 /// file descriptors. This may or may not be desired depending on use cases.
281 ///
282 /// This function helps VMs that use `fork()` for multi-process concurrency. It instructs all
283 /// GC threads to save their contexts and return from their entry-point functions. Currently,
284 /// such threads only include GC workers, and the entry point is
285 /// [`crate::memory_manager::start_worker`]. A subsequent call to `MMTK::after_fork()` will
286 /// re-spawn the threads using their saved contexts. The VM must not allocate objects in the
287 /// MMTk heap before calling `MMTK::after_fork()`.
288 ///
289 /// TODO: Currently, the MMTk core does not keep any files open for a long time. In the
290 /// future, this function and the `after_fork` function may be used for handling open file
291 /// descriptors across invocations of `fork()`. One possible use case is logging GC activities
292 /// and statistics to files, such as performing heap dumps across multiple GCs.
293 ///
294 /// If a VM intends to execute another program by calling `fork()` and immediately calling
295 /// `exec`, it may skip this function because the state of the MMTk instance will be irrelevant
296 /// in that case.
297 ///
298 /// # Caution!
299 ///
300 /// This function sends an asynchronous message to GC threads and returns immediately, but it
301 /// is only safe for the VM to call `fork()` after the underlying **native threads** of the GC
302 /// threads have exited. After calling this function, the VM should wait for their underlying
303 /// native threads to exit in VM-specific manner before calling `fork()`.
304 pub fn prepare_to_fork(&'static self) {
305 assert!(
306 self.state.is_initialized(),
307 "MMTk collection has not been initialized, yet (was initialize_collection() called before?)"
308 );
309 probe!(mmtk, prepare_to_fork);
310 self.scheduler.stop_gc_threads_for_forking();
311 }
312
313 /// Call this function after the VM called the `fork()` system call.
314 ///
315 /// This function will re-spawn MMTk threads from saved contexts.
316 ///
317 /// # Arguments
318 ///
319 /// * `tls`: The thread that wants to respawn MMTk threads after forking. This value will be
320 /// passed back to the VM in `Collection::spawn_gc_thread()` so that the VM knows the
321 /// context.
322 pub fn after_fork(&'static self, tls: VMThread) {
323 assert!(
324 self.state.is_initialized(),
325 "MMTk collection has not been initialized, yet (was initialize_collection() called before?)"
326 );
327 probe!(mmtk, after_fork);
328 self.scheduler.respawn_gc_threads_after_forking(tls);
329 }
330
331 /// Generic hook to allow benchmarks to be harnessed. MMTk will trigger a GC
332 /// to clear any residual garbage and start collecting statistics for the benchmark.
333 /// This is usually called by the benchmark harness as its last step before the actual benchmark.
334 pub fn harness_begin(&self, tls: VMMutatorThread) {
335 probe!(mmtk, harness_begin);
336 self.handle_user_collection_request(tls, true, true);
337 self.state.inside_harness.store(true, Ordering::SeqCst);
338 self.stats.start_all();
339 self.scheduler.enable_stat();
340 }
341
342 /// Generic hook to allow benchmarks to be harnessed. MMTk will stop collecting
343 /// statistics, and print out the collected statistics in a defined format.
344 /// This is usually called by the benchmark harness right after the actual benchmark.
345 pub fn harness_end(&'static self) {
346 self.stats.stop_all(self);
347 self.state.inside_harness.store(false, Ordering::SeqCst);
348 probe!(mmtk, harness_end);
349 }
350
351 #[cfg(feature = "sanity")]
352 pub(crate) fn sanity_begin(&self) {
353 self.inside_sanity.store(true, Ordering::Relaxed)
354 }
355
356 #[cfg(feature = "sanity")]
357 pub(crate) fn sanity_end(&self) {
358 self.inside_sanity.store(false, Ordering::Relaxed)
359 }
360
361 #[cfg(feature = "sanity")]
362 pub(crate) fn is_in_sanity(&self) -> bool {
363 self.inside_sanity.load(Ordering::Relaxed)
364 }
365
366 pub(crate) fn set_gc_status(&self, s: GcStatus) {
367 let mut gc_status = self.state.gc_status.lock().unwrap();
368 if *gc_status == GcStatus::NotInGC {
369 self.state.stacks_prepared.store(false, Ordering::SeqCst);
370 // FIXME stats
371 self.stats.start_gc();
372 }
373 *gc_status = s;
374 if *gc_status == GcStatus::NotInGC {
375 // FIXME stats
376 if self.stats.get_gathering_stats() {
377 self.stats.end_gc();
378 }
379 }
380 }
381
382 /// Return true if a collection is in progress.
383 pub fn gc_in_progress(&self) -> bool {
384 *self.state.gc_status.lock().unwrap() != GcStatus::NotInGC
385 }
386
387 /// Return true if a collection is in progress and past the preparatory stage.
388 pub fn gc_in_progress_proper(&self) -> bool {
389 *self.state.gc_status.lock().unwrap() == GcStatus::GcProper
390 }
391
392 /// Return true if the current GC is an emergency GC.
393 ///
394 /// An emergency GC happens when a normal GC cannot reclaim enough memory to satisfy allocation
395 /// requests. Plans may do full-heap GC, defragmentation, etc. during emergency GCs in order to
396 /// free up more memory.
397 ///
398 /// VM bindings can call this function during GC to check if the current GC is an emergency GC.
399 /// If it is, the VM binding is recommended to retain fewer objects than normal GCs, to the
400 /// extent allowed by the specification of the VM or the language. For example, the VM binding
401 /// may choose not to retain objects used for caching. Specifically, for Java virtual machines,
402 /// that means not retaining referents of [`SoftReference`][java-soft-ref] which is primarily
403 /// designed for implementing memory-sensitive caches.
404 ///
405 /// [java-soft-ref]: https://docs.oracle.com/en/java/javase/21/docs/api/java.base/java/lang/ref/SoftReference.html
406 pub fn is_emergency_collection(&self) -> bool {
407 self.state.is_emergency_collection()
408 }
409
410 /// Return true if the current GC is trigger manually by the user/binding.
411 pub fn is_user_triggered_collection(&self) -> bool {
412 self.state.is_user_triggered_collection()
413 }
414
415 /// The application code has requested a collection. This is just a GC hint, and
416 /// we may ignore it.
417 ///
418 /// Returns whether a GC was ran or not. If MMTk triggers a GC, this method will block the
419 /// calling thread and return true when the GC finishes. Otherwise, this method returns
420 /// false immediately.
421 ///
422 /// # Arguments
423 /// * `tls`: The mutator thread that requests the GC
424 /// * `force`: The request cannot be ignored (except for NoGC)
425 /// * `exhaustive`: The requested GC should be exhaustive. This is also a hint.
426 pub fn handle_user_collection_request(
427 &self,
428 tls: VMMutatorThread,
429 force: bool,
430 exhaustive: bool,
431 ) -> bool {
432 if self
433 .gc_trigger
434 .handle_user_collection_request(force, exhaustive)
435 {
436 use crate::vm::Collection;
437 VM::VMCollection::block_for_gc(tls);
438 true
439 } else {
440 false
441 }
442 }
443
444 /// MMTK has requested stop-the-world activity (e.g., stw within a concurrent gc).
445 #[allow(unused)]
446 pub fn trigger_internal_collection_request(&self) {
447 self.gc_trigger.trigger_internal_collection_request();
448 }
449
450 /// Get a reference to the plan.
451 pub fn get_plan(&self) -> &dyn Plan<VM = VM> {
452 unsafe { &**(self.plan.get()) }
453 }
454
455 /// Get the plan as mutable reference.
456 ///
457 /// # Safety
458 ///
459 /// This is unsafe because the caller must ensure that the plan is not used by other threads.
460 #[allow(clippy::mut_from_ref)]
461 pub unsafe fn get_plan_mut(&self) -> &mut dyn Plan<VM = VM> {
462 &mut **(self.plan.get())
463 }
464
465 /// Get the run time options.
466 pub fn get_options(&self) -> &Options {
467 &self.options
468 }
469
470 /// Enumerate objects in all spaces in this MMTK instance.
471 ///
472 /// The call-back function `f` is called for every object that has the valid object bit (VO
473 /// bit), i.e. objects that are allocated in the heap of this MMTK instance, but has not been
474 /// reclaimed, yet.
475 ///
476 /// # Notes about object initialization and finalization
477 ///
478 /// When this function visits an object, it only guarantees that its VO bit must have been set.
479 /// It is not guaranteed if the object has been "fully initialized" in the sense of the
480 /// programming language the VM is implementing. For example, the object header and the type
481 /// information may not have been written.
482 ///
483 /// It will also visit objects that have been "finalized" in the sense of the programming
484 /// langauge the VM is implementing, as long as the object has not been reclaimed by the GC,
485 /// yet. Be careful. If the object header is destroyed, it may not be safe to access such
486 /// objects in the high-level language.
487 ///
488 /// # Interaction with allocation and GC
489 ///
490 /// This function does not mutate the heap. It is safe if multiple threads execute this
491 /// function concurrently during mutator time.
492 ///
493 /// It has *undefined behavior* if allocation or GC happens while this function is being
494 /// executed. The VM binding must ensure no threads are allocating and GC does not start while
495 /// executing this function. One way to do this is stopping all mutators before calling this
496 /// function.
497 ///
498 /// Some high-level languages may provide an API that allows the user to allocate objects and
499 /// trigger GC while enumerating objects. One example is [`ObjectSpace::each_object`][os_eo] in
500 /// Ruby. The VM binding may use the callback of this function to save all visited object
501 /// references and let the user visit those references after this function returns. Make sure
502 /// those saved references are in the root set or in an object that will live through GCs before
503 /// the high-level language finishes visiting the saved object references.
504 ///
505 /// [os_eo]: https://docs.ruby-lang.org/en/master/ObjectSpace.html#method-c-each_object
506 #[cfg(feature = "vo_bit")]
507 pub fn enumerate_objects<F>(&self, f: F)
508 where
509 F: FnMut(ObjectReference),
510 {
511 use crate::util::object_enum;
512
513 let mut enumerator = object_enum::ClosureObjectEnumerator::<_, VM>::new(f);
514 let plan = self.get_plan();
515 plan.for_each_space(&mut |space| {
516 space.enumerate_objects(&mut enumerator);
517 })
518 }
519
520 /// Aggregate a hash map of live bytes per space with the space stats to produce
521 /// a map of live bytes stats for the spaces.
522 pub(crate) fn aggregate_live_bytes_in_last_gc(
523 &self,
524 live_bytes_per_space: [usize; MAX_SPACES],
525 ) -> HashMap<&'static str, crate::LiveBytesStats> {
526 use crate::policy::space::Space;
527 let mut ret = HashMap::new();
528 self.get_plan().for_each_space(&mut |space: &dyn Space<VM>| {
529 let space_name = space.get_name();
530 let space_idx = space.get_descriptor().get_index();
531 let used_pages = space.reserved_pages();
532 if used_pages != 0 {
533 let used_bytes = crate::util::conversions::pages_to_bytes(used_pages);
534 let live_bytes = live_bytes_per_space[space_idx];
535 debug_assert!(
536 live_bytes <= used_bytes,
537 "Live bytes of objects in {} ({} bytes) is larger than used pages ({} bytes), something is wrong.",
538 space_name, live_bytes, used_bytes
539 );
540 ret.insert(space_name, crate::LiveBytesStats {
541 live_bytes,
542 used_pages,
543 used_bytes,
544 });
545 }
546 });
547 ret
548 }
549
550 /// Print VM maps. It will print the memory ranges used by spaces as well as some attributes of
551 /// the spaces.
552 ///
553 /// - "I": The space is immortal. Its objects will never die.
554 /// - "N": The space is non-movable. Its objects will never move.
555 ///
556 /// Arguments:
557 /// * `out`: the place to print the VM maps.
558 /// * `space_name`: If `None`, print all spaces;
559 /// if `Some(n)`, only print the space whose name is `n`.
560 pub fn debug_print_vm_maps(
561 &self,
562 out: &mut impl std::fmt::Write,
563 space_name: Option<&str>,
564 ) -> Result<(), std::fmt::Error> {
565 let mut result_so_far = Ok(());
566 self.get_plan().for_each_space(&mut |space| {
567 if result_so_far.is_ok()
568 && (space_name.is_none() || space_name == Some(space.get_name()))
569 {
570 result_so_far = crate::policy::space::print_vm_map(space, out);
571 }
572 });
573 result_so_far
574 }
575
576 /// Initialize object metadata for a VM space object.
577 /// Objects in the VM space are allocated/managed by the binding. This function provides a way for
578 /// the binding to set object metadata in MMTk for an object in the space.
579 #[cfg(feature = "vm_space")]
580 pub fn initialize_vm_space_object(&self, object: crate::util::ObjectReference) {
581 use crate::policy::sft::SFT;
582 let bytes = VM::VMObjectModel::get_current_size(object);
583 self.get_plan()
584 .base()
585 .vm_space
586 .initialize_object_metadata(object, bytes)
587 }
588}
589
590/// A non-mangled function to print object information for debugging purposes. This function can be directly
591/// called from a debugger.
592#[no_mangle]
593pub fn mmtk_debug_print_object(object: crate::util::ObjectReference) {
594 // If the address is unmapped, we cannot access its metadata. Just quit.
595 if !object.to_raw_address().is_mapped() {
596 println!("{} is not mapped in MMTk", object);
597 return;
598 }
599
600 // If the address is not aligned to the object reference size, it is not an object reference.
601 if !object
602 .to_raw_address()
603 .is_aligned_to(crate::util::ObjectReference::ALIGNMENT)
604 {
605 println!(
606 "{} is not properly aligned. It is not an object reference.",
607 object
608 );
609 }
610
611 // Forward to the space
612 let sft = SFT_MAP.get_checked(object.to_raw_address());
613 // Print the space name
614 println!("In {}:", sft.name());
615 // Print object information
616 sft.debug_print_object_info(object);
617}