mmtk/util/
address.rs

1use atomic_traits::Atomic;
2use bytemuck::NoUninit;
3
4use std::fmt;
5use std::mem;
6use std::num::NonZeroUsize;
7use std::ops::*;
8use std::sync::atomic::Ordering;
9
10use crate::mmtk::{MMAPPER, SFT_MAP};
11
12/// size in bytes
13pub type ByteSize = usize;
14/// offset in byte
15pub type ByteOffset = isize;
16
17/// Address represents an arbitrary address. This is designed to represent
18/// address and do address arithmetic mostly in a safe way, and to allow
19/// mark some operations as unsafe. This type needs to be zero overhead
20/// (memory wise and time wise). The idea is from the paper
21/// High-level Low-level Programming (VEE09) and JikesRVM.
22#[repr(transparent)]
23#[derive(Copy, Clone, Eq, Hash, PartialOrd, Ord, PartialEq, NoUninit)]
24pub struct Address(usize);
25
26/// Address + ByteSize (positive)
27impl Add<ByteSize> for Address {
28    type Output = Address;
29    fn add(self, offset: ByteSize) -> Address {
30        Address(self.0 + offset)
31    }
32}
33
34/// Address += ByteSize (positive)
35impl AddAssign<ByteSize> for Address {
36    fn add_assign(&mut self, offset: ByteSize) {
37        self.0 += offset;
38    }
39}
40
41/// Address + ByteOffset (positive or negative)
42impl Add<ByteOffset> for Address {
43    type Output = Address;
44    fn add(self, offset: ByteOffset) -> Address {
45        Address((self.0 as isize + offset) as usize)
46    }
47}
48
49/// Address += ByteOffset (positive or negative)
50impl AddAssign<ByteOffset> for Address {
51    fn add_assign(&mut self, offset: ByteOffset) {
52        self.0 = (self.0 as isize + offset) as usize
53    }
54}
55
56/// Address - ByteSize (positive)
57impl Sub<ByteSize> for Address {
58    type Output = Address;
59    fn sub(self, offset: ByteSize) -> Address {
60        Address(self.0 - offset)
61    }
62}
63
64/// Address -= ByteSize (positive)
65impl SubAssign<ByteSize> for Address {
66    fn sub_assign(&mut self, offset: ByteSize) {
67        self.0 -= offset;
68    }
69}
70
71/// Address - Address (the first address must be higher)
72impl Sub<Address> for Address {
73    type Output = ByteSize;
74    fn sub(self, other: Address) -> ByteSize {
75        debug_assert!(
76            self.0 >= other.0,
77            "for (addr_a - addr_b), a({}) needs to be larger than b({})",
78            self,
79            other
80        );
81        self.0 - other.0
82    }
83}
84
85/// Address & mask
86impl BitAnd<usize> for Address {
87    type Output = usize;
88    fn bitand(self, other: usize) -> usize {
89        self.0 & other
90    }
91}
92// Be careful about the return type here. Address & u8 = u8
93// This is different from Address | u8 = usize
94impl BitAnd<u8> for Address {
95    type Output = u8;
96    fn bitand(self, other: u8) -> u8 {
97        (self.0 as u8) & other
98    }
99}
100
101/// Address | mask
102impl BitOr<usize> for Address {
103    type Output = usize;
104    fn bitor(self, other: usize) -> usize {
105        self.0 | other
106    }
107}
108// Be careful about the return type here. Address | u8 = size
109// This is different from Address & u8 = u8
110impl BitOr<u8> for Address {
111    type Output = usize;
112    fn bitor(self, other: u8) -> usize {
113        self.0 | (other as usize)
114    }
115}
116
117/// Address >> shift (get an index)
118impl Shr<usize> for Address {
119    type Output = usize;
120    fn shr(self, shift: usize) -> usize {
121        self.0 >> shift
122    }
123}
124
125/// Address << shift (get an index)
126impl Shl<usize> for Address {
127    type Output = usize;
128    fn shl(self, shift: usize) -> usize {
129        self.0 << shift
130    }
131}
132
133impl Address {
134    /// The lowest possible address.
135    pub const ZERO: Self = Address(0);
136    /// The highest possible address.
137    pub const MAX: Self = Address(usize::MAX);
138
139    /// creates Address from a pointer
140    pub fn from_ptr<T>(ptr: *const T) -> Address {
141        Address(ptr as usize)
142    }
143
144    /// creates Address from a Rust reference
145    pub fn from_ref<T>(r: &T) -> Address {
146        Address(r as *const T as usize)
147    }
148
149    /// creates Address from a mutable pointer
150    pub fn from_mut_ptr<T>(ptr: *mut T) -> Address {
151        Address(ptr as usize)
152    }
153
154    /// creates a null Address (0)
155    /// # Safety
156    /// It is unsafe and the user needs to be aware that they are creating an invalid address.
157    /// The zero address should only be used as unininitialized or sentinel values in performance critical code (where you dont want to use `Option<Address>`).
158    pub const unsafe fn zero() -> Address {
159        Address(0)
160    }
161
162    /// creates an Address of (usize::MAX)
163    /// # Safety
164    /// It is unsafe and the user needs to be aware that they are creating an invalid address.
165    /// The max address should only be used as unininitialized or sentinel values in performance critical code (where you dont want to use `Option<Address>`).
166    pub unsafe fn max() -> Address {
167        Address(usize::MAX)
168    }
169
170    /// creates an arbitrary Address
171    /// # Safety
172    /// It is unsafe and the user needs to be aware that they may create an invalid address.
173    /// This creates arbitrary addresses which may not be valid. This should only be used for hard-coded addresses. Any other uses of this function could be
174    /// replaced with more proper alternatives.
175    pub const unsafe fn from_usize(raw: usize) -> Address {
176        Address(raw)
177    }
178
179    /// shifts the address by N T-typed objects (returns addr + N * size_of(T))
180    pub fn shift<T>(self, offset: isize) -> Self {
181        self + mem::size_of::<T>() as isize * offset
182    }
183
184    // These const functions are duplicated with the operator traits. But we need them,
185    // as we need them to declare constants.
186
187    /// Get the number of bytes between two addresses. The current address needs to be higher than the other address.
188    pub const fn get_extent(self, other: Address) -> ByteSize {
189        self.0 - other.0
190    }
191
192    /// Get the offset from `other` to `self`. The result is negative is `self` is lower than `other`.
193    pub const fn get_offset(self, other: Address) -> ByteOffset {
194        self.0 as isize - other.0 as isize
195    }
196
197    // We implemented the Add trait but we still keep this add function.
198    // The add() function is const fn, and we can use it to declare Address constants.
199    // The Add trait function cannot be const.
200    #[allow(clippy::should_implement_trait)]
201    /// Add an offset to the address.
202    pub const fn add(self, size: usize) -> Address {
203        Address(self.0 + size)
204    }
205
206    // We implemented the Sub trait but we still keep this sub function.
207    // The sub() function is const fn, and we can use it to declare Address constants.
208    // The Sub trait function cannot be const.
209    #[allow(clippy::should_implement_trait)]
210    /// Subtract an offset from the address.
211    pub const fn sub(self, size: usize) -> Address {
212        Address(self.0 - size)
213    }
214
215    /// Apply an signed offset to the address.
216    pub const fn offset(self, offset: isize) -> Address {
217        Address(self.0.wrapping_add_signed(offset))
218    }
219
220    /// Bitwise 'and' with a mask.
221    pub const fn and(self, mask: usize) -> usize {
222        self.0 & mask
223    }
224
225    /// Perform a saturating subtract on the Address
226    pub const fn saturating_sub(self, size: usize) -> Address {
227        Address(self.0.saturating_sub(size))
228    }
229
230    /// loads a value of type T from the address
231    /// # Safety
232    /// This could throw a segment fault if the address is invalid
233    pub unsafe fn load<T: Copy>(self) -> T {
234        *(self.0 as *mut T)
235    }
236
237    /// stores a value of type T to the address
238    /// # Safety
239    /// This could throw a segment fault if the address is invalid
240    pub unsafe fn store<T>(self, value: T) {
241        // We use a ptr.write() operation as directly setting the pointer would drop the old value
242        // which may result in unexpected behaviour
243        (self.0 as *mut T).write(value);
244    }
245
246    /// atomic operation: load
247    /// # Safety
248    /// This could throw a segment fault if the address is invalid
249    pub unsafe fn atomic_load<T: Atomic>(self, order: Ordering) -> T::Type {
250        let loc = &*(self.0 as *const T);
251        loc.load(order)
252    }
253
254    /// atomic operation: store
255    /// # Safety
256    /// This could throw a segment fault if the address is invalid
257    pub unsafe fn atomic_store<T: Atomic>(self, val: T::Type, order: Ordering) {
258        let loc = &*(self.0 as *const T);
259        loc.store(val, order)
260    }
261
262    /// atomic operation: compare and exchange usize
263    /// # Safety
264    /// This could throw a segment fault if the address is invalid
265    pub unsafe fn compare_exchange<T: Atomic>(
266        self,
267        old: T::Type,
268        new: T::Type,
269        success: Ordering,
270        failure: Ordering,
271    ) -> Result<T::Type, T::Type> {
272        let loc = &*(self.0 as *const T);
273        loc.compare_exchange(old, new, success, failure)
274    }
275
276    /// is this address zero?
277    pub fn is_zero(self) -> bool {
278        self.0 == 0
279    }
280
281    /// aligns up the address to the given alignment
282    pub const fn align_up(self, align: ByteSize) -> Address {
283        use crate::util::conversions;
284        Address(conversions::raw_align_up(self.0, align))
285    }
286
287    /// aligns down the address to the given alignment
288    pub const fn align_down(self, align: ByteSize) -> Address {
289        use crate::util::conversions;
290        Address(conversions::raw_align_down(self.0, align))
291    }
292
293    /// is this address aligned to the given alignment
294    pub const fn is_aligned_to(self, align: usize) -> bool {
295        use crate::util::conversions;
296        conversions::raw_is_aligned(self.0, align)
297    }
298
299    /// converts the Address to a pointer
300    pub fn to_ptr<T>(self) -> *const T {
301        self.0 as *const T
302    }
303
304    /// converts the Address to a mutable pointer
305    pub fn to_mut_ptr<T>(self) -> *mut T {
306        self.0 as *mut T
307    }
308
309    /// converts the Address to a Rust reference
310    ///
311    /// # Safety
312    /// The caller must guarantee the address actually points to a Rust object.
313    pub unsafe fn as_ref<'a, T>(self) -> &'a T {
314        &*self.to_mut_ptr()
315    }
316
317    /// converts the Address to a mutable Rust reference
318    ///
319    /// # Safety
320    /// The caller must guarantee the address actually points to a Rust object.
321    pub unsafe fn as_mut_ref<'a, T>(self) -> &'a mut T {
322        &mut *self.to_mut_ptr()
323    }
324
325    /// converts the Address to a pointer-sized integer
326    pub const fn as_usize(self) -> usize {
327        self.0
328    }
329
330    /// returns the chunk index for this address
331    pub fn chunk_index(self) -> usize {
332        use crate::util::conversions;
333        conversions::address_to_chunk_index(self)
334    }
335
336    /// return true if the referenced memory is mapped
337    pub fn is_mapped(self) -> bool {
338        if self.0 == 0 {
339            false
340        } else {
341            MMAPPER.is_mapped_address(self)
342        }
343    }
344
345    /// Returns the intersection of the two address ranges. The returned range could
346    /// be empty if there is no intersection between the ranges.
347    pub fn range_intersection(r1: &Range<Address>, r2: &Range<Address>) -> Range<Address> {
348        r1.start.max(r2.start)..r1.end.min(r2.end)
349    }
350}
351
352/// allows print Address as upper-case hex value
353impl fmt::UpperHex for Address {
354    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
355        write!(f, "{:X}", self.0)
356    }
357}
358
359/// allows print Address as lower-case hex value
360impl fmt::LowerHex for Address {
361    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
362        write!(f, "{:x}", self.0)
363    }
364}
365
366/// allows Display format the Address (as upper-case hex value with 0x prefix)
367impl fmt::Display for Address {
368    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
369        write!(f, "{:#x}", self.0)
370    }
371}
372
373/// allows Debug format the Address (as upper-case hex value with 0x prefix)
374impl fmt::Debug for Address {
375    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
376        write!(f, "{:#x}", self.0)
377    }
378}
379
380impl std::str::FromStr for Address {
381    type Err = std::num::ParseIntError;
382
383    fn from_str(s: &str) -> Result<Self, Self::Err> {
384        let raw: usize = s.parse()?;
385        Ok(Address(raw))
386    }
387}
388
389#[cfg(test)]
390mod tests {
391    use crate::util::Address;
392
393    #[test]
394    fn align_up() {
395        unsafe {
396            assert_eq!(
397                Address::from_usize(0x10).align_up(0x10),
398                Address::from_usize(0x10)
399            );
400            assert_eq!(
401                Address::from_usize(0x11).align_up(0x10),
402                Address::from_usize(0x20)
403            );
404            assert_eq!(
405                Address::from_usize(0x20).align_up(0x10),
406                Address::from_usize(0x20)
407            );
408        }
409    }
410
411    #[test]
412    fn align_down() {
413        unsafe {
414            assert_eq!(
415                Address::from_usize(0x10).align_down(0x10),
416                Address::from_usize(0x10)
417            );
418            assert_eq!(
419                Address::from_usize(0x11).align_down(0x10),
420                Address::from_usize(0x10)
421            );
422            assert_eq!(
423                Address::from_usize(0x20).align_down(0x10),
424                Address::from_usize(0x20)
425            );
426        }
427    }
428
429    #[test]
430    fn is_aligned_to() {
431        unsafe {
432            assert!(Address::from_usize(0x10).is_aligned_to(0x10));
433            assert!(!Address::from_usize(0x11).is_aligned_to(0x10));
434            assert!(Address::from_usize(0x10).is_aligned_to(0x8));
435            assert!(!Address::from_usize(0x10).is_aligned_to(0x20));
436        }
437    }
438
439    #[test]
440    fn bit_and() {
441        unsafe {
442            assert_eq!(
443                Address::from_usize(0b1111_1111_1100usize) & 0b1010u8,
444                0b1000u8
445            );
446            assert_eq!(
447                Address::from_usize(0b1111_1111_1100usize) & 0b1000_0000_1010usize,
448                0b1000_0000_1000usize
449            );
450        }
451    }
452
453    #[test]
454    fn bit_or() {
455        unsafe {
456            assert_eq!(
457                Address::from_usize(0b1111_1111_1100usize) | 0b1010u8,
458                0b1111_1111_1110usize
459            );
460            assert_eq!(
461                Address::from_usize(0b1111_1111_1100usize) | 0b1000_0000_1010usize,
462                0b1111_1111_1110usize
463            );
464        }
465    }
466}
467
468use crate::vm::VMBinding;
469
470/// `ObjectReference` represents address for an object. Compared with `Address`, operations allowed
471/// on `ObjectReference` are very limited. No address arithmetics are allowed for `ObjectReference`.
472/// The idea is from the paper [Demystifying Magic: High-level Low-level Programming (VEE09)][FBC09]
473/// and [JikesRVM].
474///
475/// In MMTk, `ObjectReference` holds a non-zero address, i.e. its **raw address**.  It must satisfy
476/// the following requirements.
477///
478/// -   It uniquely references an MMTk object.
479/// -   The address must be within the address range of the object it refers to.
480/// -   The address must be word-aligned.
481/// -   It must be efficient to access object metadata from an `ObjectReference`.
482///
483/// Each `ObjectReference` uniquely identifies exactly one MMTk object.  There is no "null
484/// reference" (see below for details).
485///
486/// Conversely, each object has a unique (raw) address used for `ObjectReference`.  That address is
487/// nominated by the VM binding right after an object is allocated in the MMTk heap (i.e. the
488/// argument of [`crate::memory_manager::post_alloc`]).  The same address is used by all
489/// `ObjectReference` instances that refer to that object until the object is moved, at which time
490/// the VM binding shall choose another address to use as the `ObjectReference` of the new copy (in
491/// [`crate::vm::ObjectModel::copy`] or [`crate::vm::ObjectModel::get_reference_when_copied_to`])
492/// until the object is moved again.
493///
494/// In addition to the raw address, there are also two addresses related to each object allocated in
495/// MMTk heap, namely **starting address** and **header address**.  See the
496/// [`crate::vm::ObjectModel`] trait for their precise definition.
497///
498/// The VM binding may, in theory, pick any aligned address within the object, and it doesn't have
499/// to be the starting address.  However, during tracing, MMTk will need to access object metadata
500/// from a `ObjectReference`.  Particularly, it needs to identify reference fields, and query
501/// information about the object, such as object size.  Such information is usually accessed from
502/// object headers.  The choice of `ObjectReference` must make such accesses efficient.
503///
504/// Because the raw address is within the object, MMTk will also use the raw address to identify the
505/// space or region (chunk, block, line, etc.) that contains the object, and to access side metadata
506/// and the SFTMap.  If a VM binding needs to access side metadata directly (particularly, setting
507/// the "valid-object (VO) bit" in allocation fast paths), it shall use the raw address to compute
508/// the byte and bit address of the metadata bits.
509///
510/// # Notes
511///
512/// ## About VMs own concepts of "object references"
513///
514/// A runtime may define its own concept of "object references" differently from MMTk's
515/// `ObjectReference` type.  It may define its object reference as
516///
517/// -   the starting address of an object,
518/// -   an address inside an object,
519/// -   an address at a certain offset outside an object,
520/// -   a handle that points to an indirection table entry where a pointer to the object is held, or
521/// -   anything else that refers to an object.
522///
523/// Regardless, when passing an `ObjectReference` value to MMTk through the API, MMTk expectes its
524/// value to satisfy MMTk's definition.  This means MMTk's `ObjectReference` may not be the value
525/// held in an object field.  Some VM bindings may need to do conversions when passing object
526/// references to MMTk.  For example, adding an offset to the VM-level object reference so that the
527/// resulting address is within the object.  When using handles, the VM binding may use the *pointer
528/// stored in the entry* of the indirection table instead of the *pointer to the entry* itself as
529/// MMTk-level `ObjectReference`.
530///
531/// ## About null references
532///
533/// An [`ObjectReference`] always refers to an object.  Some VMs have special values (such as `null`
534/// in Java) that do not refer to any object.  Those values cannot be represented by
535/// `ObjectReference`.  When scanning roots and object fields, the VM binding should ignore slots
536/// that do not hold a reference to an object.  Specifically, [`crate::vm::slot::Slot::load`]
537/// returns `Option<ObjectReference>`.  It can return `None` so that MMTk skips that slot.
538///
539/// `Option<ObjectReference>` should be used for the cases where a non-null object reference may or
540/// may not exist,  That includes several API functions, including [`crate::vm::slot::Slot::load`].
541/// [`ObjectReference`] is backed by `NonZeroUsize` which cannot be zero, and it has the
542/// `#[repr(transparent)]` attribute. Thanks to [null pointer optimization (NPO)][NPO],
543/// `Option<ObjectReference>` has the same size as `NonZeroUsize` and `usize`.
544///
545/// For the convenience of passing `Option<ObjectReference>` to and from native (C/C++) programs,
546/// mmtk-core provides [`crate::util::api_util::NullableObjectReference`].
547///
548/// ## About the `VMSpace`
549///
550/// The `VMSpace` is managed by the VM binding.  The VM binding declare ranges of memory as part of
551/// the `VMSpace`, but MMTk never allocates into it.  The VM binding allocates objects into the
552/// `VMSpace` (usually by mapping boot-images), and refers to objects in the `VMSpace` using
553/// `ObjectReference`s whose raw addresses point inside those objects (and must be word-aligned,
554/// too).  MMTk will access metadata using methods of [`ObjectModel`] like other objects.  MMTk also
555/// has side metadata available for objects in the `VMSpace`.
556///
557/// ## About `ObjectReference` pointing outside MMTk spaces
558///
559/// If a VM binding implements [`crate::vm::ActivePlan::vm_trace_object`], `ObjectReference` is
560/// allowed to point to locations outside any MMTk spaces.  When tracing objects, such
561/// `ObjectReference` values will be processed by `ActivePlan::vm_trace_object` so that the VM
562/// binding can trace its own allocated objects during GC.  However, **this is an experimental
563/// feature**, and may not interact well with other parts of MMTk.  Notably, MMTk will not allocate
564/// side metadata for such `ObjectReference`, and attempts to access side metadata with a non-MMTk
565/// `ObjectReference` will result in crash. Use with caution.
566///
567/// [FBC09]: https://dl.acm.org/doi/10.1145/1508293.1508305
568/// [JikesRVM]: https://www.jikesrvm.org/
569/// [`ObjectModel`]: crate::vm::ObjectModel
570/// [NPO]: https://doc.rust-lang.org/std/option/index.html#representation
571#[repr(transparent)]
572#[derive(Copy, Clone, Eq, Hash, PartialOrd, Ord, PartialEq, NoUninit)]
573pub struct ObjectReference(NonZeroUsize);
574
575impl ObjectReference {
576    /// The required minimal alignment for object reference. If the object reference's raw address is not aligned to this value,
577    /// you will see an assertion failure in the debug build when constructing an object reference instance.
578    pub const ALIGNMENT: usize = crate::util::constants::BYTES_IN_ADDRESS;
579
580    /// Cast the object reference to its raw address.
581    pub fn to_raw_address(self) -> Address {
582        Address(self.0.get())
583    }
584
585    /// Cast a raw address to an object reference.
586    ///
587    /// If `addr` is 0, the result is `None`.
588    pub fn from_raw_address(addr: Address) -> Option<ObjectReference> {
589        debug_assert!(
590            addr.is_aligned_to(Self::ALIGNMENT),
591            "ObjectReference is required to be word aligned.  addr: {addr}"
592        );
593        NonZeroUsize::new(addr.0).map(ObjectReference)
594    }
595
596    /// Like `from_raw_address`, but assume `addr` is not zero.  This can be used to elide a check
597    /// against zero for performance-critical code.
598    ///
599    /// # Safety
600    ///
601    /// This method assumes `addr` is not zero.  It should only be used in cases where we know at
602    /// compile time that the input cannot be zero.  For example, if we compute the address by
603    /// adding a positive offset to a non-zero address, we know the result must not be zero.
604    pub unsafe fn from_raw_address_unchecked(addr: Address) -> ObjectReference {
605        debug_assert!(!addr.is_zero());
606        debug_assert!(
607            addr.is_aligned_to(Self::ALIGNMENT),
608            "ObjectReference is required to be word aligned.  addr: {addr}"
609        );
610        ObjectReference(NonZeroUsize::new_unchecked(addr.0))
611    }
612
613    /// Get the header base address from an object reference. This method is used by MMTk to get a base address for the
614    /// object header, and access the object header. This method is syntactic sugar for [`crate::vm::ObjectModel::ref_to_header`].
615    /// See the comments on [`crate::vm::ObjectModel::ref_to_header`].
616    pub fn to_header<VM: VMBinding>(self) -> Address {
617        use crate::vm::ObjectModel;
618        VM::VMObjectModel::ref_to_header(self)
619    }
620
621    /// Get the start of the allocation address for the object. This method is used by MMTk to get the start of the allocation
622    /// address originally returned from [`crate::memory_manager::alloc`] for the object.
623    /// This method is syntactic sugar for [`crate::vm::ObjectModel::ref_to_object_start`]. See comments on [`crate::vm::ObjectModel::ref_to_object_start`].
624    pub fn to_object_start<VM: VMBinding>(self) -> Address {
625        use crate::vm::ObjectModel;
626        let object_start = VM::VMObjectModel::ref_to_object_start(self);
627        debug_assert!(!VM::VMObjectModel::UNIFIED_OBJECT_REFERENCE_ADDRESS || object_start == self.to_raw_address(), "The binding claims unified object reference address, but for object reference {}, ref_to_object_start() returns {}", self, object_start);
628        debug_assert!(
629            self.to_raw_address()
630                >= object_start + VM::VMObjectModel::OBJECT_REF_OFFSET_LOWER_BOUND,
631            "The invariant `object_ref >= object_start + OBJECT_REF_OFFSET_LOWER_BOUND` is violated. \
632            object_ref: {}, object_start: {}, OBJECT_REF_OFFSET_LOWER_BOUND: {}",
633            self.to_raw_address(),
634            object_start,
635            VM::VMObjectModel::OBJECT_REF_OFFSET_LOWER_BOUND,
636        );
637        object_start
638    }
639
640    /// Is the object reachable, determined by the policy?
641    ///
642    /// # Scope
643    ///
644    /// This method is primarily used during weak reference processing.  It can check if an object
645    /// (particularly finalizable objects and objects pointed by weak references) has been reached
646    /// by following strong references or weak references of higher strength.
647    ///
648    /// This method can also be used during tracing for debug purposes.
649    ///
650    /// When called at other times, particularly during mutator time, the behavior is specific to
651    /// the implementation of the plan and policy due to their strategies of metadata clean-up.  If
652    /// the VM needs to know if any given reference is still valid, it should instead use the valid
653    /// object bit (VO-bit) metadata which is enabled by the Cargo feature "vo_bit".
654    ///
655    /// # Return value
656    ///
657    /// It returns `true` if one of the following is true:
658    ///
659    /// 1.  The object has been traced (i.e. reached) since tracing started.
660    /// 2.  The policy conservatively considers the object reachable even though it has not been
661    ///     traced.
662    ///     -   Particularly, if the plan is generational, this method will return `true` if the
663    ///         object is mature during nursery GC.
664    ///
665    /// Due to the conservativeness, if this method returns `true`, it does not necessarily mean the
666    /// object must be reachable from roots.  In generational GC, mature objects can be unreachable
667    /// from roots while the GC chooses not to reclaim their memory during nursery GC. Conversely,
668    /// all young objects reachable from the remembered set are retained even though some mature
669    /// objects in the remembered set can be unreachable in the first place.  (This is known as
670    /// *nepotism* in GC literature.)
671    ///
672    /// Note: Objects in ImmortalSpace may have `is_live = true` but are actually unreachable.
673    pub fn is_reachable(self) -> bool {
674        unsafe { SFT_MAP.get_unchecked(self.to_raw_address()) }.is_reachable(self)
675    }
676
677    /// Is the object live, determined by the policy?
678    pub fn is_live(self) -> bool {
679        unsafe { SFT_MAP.get_unchecked(self.to_raw_address()) }.is_live(self)
680    }
681
682    /// Can the object be moved?
683    pub fn is_movable(self) -> bool {
684        unsafe { SFT_MAP.get_unchecked(self.to_raw_address()) }.is_movable()
685    }
686
687    /// Get forwarding pointer if the object is forwarded.
688    pub fn get_forwarded_object(self) -> Option<Self> {
689        unsafe { SFT_MAP.get_unchecked(self.to_raw_address()) }.get_forwarded_object(self)
690    }
691
692    /// Is the object in any MMTk spaces?
693    pub fn is_in_any_space(self) -> bool {
694        unsafe { SFT_MAP.get_unchecked(self.to_raw_address()) }.is_in_space(self)
695    }
696
697    /// Is the object sane?
698    #[cfg(feature = "sanity")]
699    pub fn is_sane(self) -> bool {
700        unsafe { SFT_MAP.get_unchecked(self.to_raw_address()) }.is_sane()
701    }
702}
703
704/// allows print Address as upper-case hex value
705impl fmt::UpperHex for ObjectReference {
706    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
707        write!(f, "{:X}", self.0)
708    }
709}
710
711/// allows print Address as lower-case hex value
712impl fmt::LowerHex for ObjectReference {
713    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
714        write!(f, "{:x}", self.0)
715    }
716}
717
718/// allows Display format the Address (as upper-case hex value with 0x prefix)
719impl fmt::Display for ObjectReference {
720    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
721        write!(f, "{:#x}", self.0)
722    }
723}
724
725/// allows Debug format the Address (as upper-case hex value with 0x prefix)
726impl fmt::Debug for ObjectReference {
727    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
728        write!(f, "{:#x}", self.0)
729    }
730}