mmtk/util/address.rs
1use atomic_traits::Atomic;
2use bytemuck::NoUninit;
3
4use std::fmt;
5use std::mem;
6use std::num::NonZeroUsize;
7use std::ops::*;
8use std::sync::atomic::Ordering;
9
10use crate::mmtk::{MMAPPER, SFT_MAP};
11
12/// size in bytes
13pub type ByteSize = usize;
14/// offset in byte
15pub type ByteOffset = isize;
16
17/// Address represents an arbitrary address. This is designed to represent
18/// address and do address arithmetic mostly in a safe way, and to allow
19/// mark some operations as unsafe. This type needs to be zero overhead
20/// (memory wise and time wise). The idea is from the paper
21/// High-level Low-level Programming (VEE09) and JikesRVM.
22#[repr(transparent)]
23#[derive(Copy, Clone, Eq, Hash, PartialOrd, Ord, PartialEq, NoUninit)]
24pub struct Address(usize);
25
26/// Address + ByteSize (positive)
27impl Add<ByteSize> for Address {
28 type Output = Address;
29 fn add(self, offset: ByteSize) -> Address {
30 Address(self.0 + offset)
31 }
32}
33
34/// Address += ByteSize (positive)
35impl AddAssign<ByteSize> for Address {
36 fn add_assign(&mut self, offset: ByteSize) {
37 self.0 += offset;
38 }
39}
40
41/// Address + ByteOffset (positive or negative)
42impl Add<ByteOffset> for Address {
43 type Output = Address;
44 fn add(self, offset: ByteOffset) -> Address {
45 Address((self.0 as isize + offset) as usize)
46 }
47}
48
49/// Address += ByteOffset (positive or negative)
50impl AddAssign<ByteOffset> for Address {
51 fn add_assign(&mut self, offset: ByteOffset) {
52 self.0 = (self.0 as isize + offset) as usize
53 }
54}
55
56/// Address - ByteSize (positive)
57impl Sub<ByteSize> for Address {
58 type Output = Address;
59 fn sub(self, offset: ByteSize) -> Address {
60 Address(self.0 - offset)
61 }
62}
63
64/// Address -= ByteSize (positive)
65impl SubAssign<ByteSize> for Address {
66 fn sub_assign(&mut self, offset: ByteSize) {
67 self.0 -= offset;
68 }
69}
70
71/// Address - Address (the first address must be higher)
72impl Sub<Address> for Address {
73 type Output = ByteSize;
74 fn sub(self, other: Address) -> ByteSize {
75 debug_assert!(
76 self.0 >= other.0,
77 "for (addr_a - addr_b), a({}) needs to be larger than b({})",
78 self,
79 other
80 );
81 self.0 - other.0
82 }
83}
84
85/// Address & mask
86impl BitAnd<usize> for Address {
87 type Output = usize;
88 fn bitand(self, other: usize) -> usize {
89 self.0 & other
90 }
91}
92// Be careful about the return type here. Address & u8 = u8
93// This is different from Address | u8 = usize
94impl BitAnd<u8> for Address {
95 type Output = u8;
96 fn bitand(self, other: u8) -> u8 {
97 (self.0 as u8) & other
98 }
99}
100
101/// Address | mask
102impl BitOr<usize> for Address {
103 type Output = usize;
104 fn bitor(self, other: usize) -> usize {
105 self.0 | other
106 }
107}
108// Be careful about the return type here. Address | u8 = size
109// This is different from Address & u8 = u8
110impl BitOr<u8> for Address {
111 type Output = usize;
112 fn bitor(self, other: u8) -> usize {
113 self.0 | (other as usize)
114 }
115}
116
117/// Address >> shift (get an index)
118impl Shr<usize> for Address {
119 type Output = usize;
120 fn shr(self, shift: usize) -> usize {
121 self.0 >> shift
122 }
123}
124
125/// Address << shift (get an index)
126impl Shl<usize> for Address {
127 type Output = usize;
128 fn shl(self, shift: usize) -> usize {
129 self.0 << shift
130 }
131}
132
133impl Address {
134 /// The lowest possible address.
135 pub const ZERO: Self = Address(0);
136 /// The highest possible address.
137 pub const MAX: Self = Address(usize::MAX);
138
139 /// creates Address from a pointer
140 pub fn from_ptr<T>(ptr: *const T) -> Address {
141 Address(ptr as usize)
142 }
143
144 /// creates Address from a Rust reference
145 pub fn from_ref<T>(r: &T) -> Address {
146 Address(r as *const T as usize)
147 }
148
149 /// creates Address from a mutable pointer
150 pub fn from_mut_ptr<T>(ptr: *mut T) -> Address {
151 Address(ptr as usize)
152 }
153
154 /// creates a null Address (0)
155 /// # Safety
156 /// It is unsafe and the user needs to be aware that they are creating an invalid address.
157 /// The zero address should only be used as unininitialized or sentinel values in performance critical code (where you dont want to use `Option<Address>`).
158 pub const unsafe fn zero() -> Address {
159 Address(0)
160 }
161
162 /// creates an Address of (usize::MAX)
163 /// # Safety
164 /// It is unsafe and the user needs to be aware that they are creating an invalid address.
165 /// The max address should only be used as unininitialized or sentinel values in performance critical code (where you dont want to use `Option<Address>`).
166 pub unsafe fn max() -> Address {
167 Address(usize::MAX)
168 }
169
170 /// creates an arbitrary Address
171 /// # Safety
172 /// It is unsafe and the user needs to be aware that they may create an invalid address.
173 /// This creates arbitrary addresses which may not be valid. This should only be used for hard-coded addresses. Any other uses of this function could be
174 /// replaced with more proper alternatives.
175 pub const unsafe fn from_usize(raw: usize) -> Address {
176 Address(raw)
177 }
178
179 /// shifts the address by N T-typed objects (returns addr + N * size_of(T))
180 pub fn shift<T>(self, offset: isize) -> Self {
181 self + mem::size_of::<T>() as isize * offset
182 }
183
184 // These const functions are duplicated with the operator traits. But we need them,
185 // as we need them to declare constants.
186
187 /// Get the number of bytes between two addresses. The current address needs to be higher than the other address.
188 pub const fn get_extent(self, other: Address) -> ByteSize {
189 self.0 - other.0
190 }
191
192 /// Get the offset from `other` to `self`. The result is negative is `self` is lower than `other`.
193 pub const fn get_offset(self, other: Address) -> ByteOffset {
194 self.0 as isize - other.0 as isize
195 }
196
197 // We implemented the Add trait but we still keep this add function.
198 // The add() function is const fn, and we can use it to declare Address constants.
199 // The Add trait function cannot be const.
200 #[allow(clippy::should_implement_trait)]
201 /// Add an offset to the address.
202 pub const fn add(self, size: usize) -> Address {
203 Address(self.0 + size)
204 }
205
206 /// Wrapping (modular) addition. Computes self + rhs, wrapping around at the boundary of the type.
207 pub const fn wrapping_add(self, size: usize) -> Address {
208 Address(self.0.wrapping_add(size))
209 }
210
211 // We implemented the Sub trait but we still keep this sub function.
212 // The sub() function is const fn, and we can use it to declare Address constants.
213 // The Sub trait function cannot be const.
214 #[allow(clippy::should_implement_trait)]
215 /// Subtract an offset from the address.
216 pub const fn sub(self, size: usize) -> Address {
217 Address(self.0 - size)
218 }
219
220 /// Apply an signed offset to the address.
221 pub const fn offset(self, offset: isize) -> Address {
222 Address(self.0.wrapping_add_signed(offset))
223 }
224
225 /// Bitwise 'and' with a mask.
226 pub const fn and(self, mask: usize) -> usize {
227 self.0 & mask
228 }
229
230 /// Perform a saturating subtract on the Address
231 pub const fn saturating_sub(self, size: usize) -> Address {
232 Address(self.0.saturating_sub(size))
233 }
234
235 /// loads a value of type T from the address
236 /// # Safety
237 /// This could throw a segment fault if the address is invalid
238 pub unsafe fn load<T: Copy>(self) -> T {
239 *(self.0 as *mut T)
240 }
241
242 /// stores a value of type T to the address
243 /// # Safety
244 /// This could throw a segment fault if the address is invalid
245 pub unsafe fn store<T>(self, value: T) {
246 // We use a ptr.write() operation as directly setting the pointer would drop the old value
247 // which may result in unexpected behaviour
248 (self.0 as *mut T).write(value);
249 }
250
251 /// atomic operation: load
252 /// # Safety
253 /// This could throw a segment fault if the address is invalid
254 pub unsafe fn atomic_load<T: Atomic>(self, order: Ordering) -> T::Type {
255 let loc = &*(self.0 as *const T);
256 loc.load(order)
257 }
258
259 /// atomic operation: store
260 /// # Safety
261 /// This could throw a segment fault if the address is invalid
262 pub unsafe fn atomic_store<T: Atomic>(self, val: T::Type, order: Ordering) {
263 let loc = &*(self.0 as *const T);
264 loc.store(val, order)
265 }
266
267 /// atomic operation: compare and exchange usize
268 /// # Safety
269 /// This could throw a segment fault if the address is invalid
270 pub unsafe fn compare_exchange<T: Atomic>(
271 self,
272 old: T::Type,
273 new: T::Type,
274 success: Ordering,
275 failure: Ordering,
276 ) -> Result<T::Type, T::Type> {
277 let loc = &*(self.0 as *const T);
278 loc.compare_exchange(old, new, success, failure)
279 }
280
281 /// is this address zero?
282 pub fn is_zero(self) -> bool {
283 self.0 == 0
284 }
285
286 /// aligns up the address to the given alignment
287 pub const fn align_up(self, align: ByteSize) -> Address {
288 use crate::util::conversions;
289 Address(conversions::raw_align_up(self.0, align))
290 }
291
292 /// aligns down the address to the given alignment
293 pub const fn align_down(self, align: ByteSize) -> Address {
294 use crate::util::conversions;
295 Address(conversions::raw_align_down(self.0, align))
296 }
297
298 /// is this address aligned to the given alignment
299 pub const fn is_aligned_to(self, align: usize) -> bool {
300 use crate::util::conversions;
301 conversions::raw_is_aligned(self.0, align)
302 }
303
304 /// converts the Address to a pointer
305 pub fn to_ptr<T>(self) -> *const T {
306 self.0 as *const T
307 }
308
309 /// converts the Address to a mutable pointer
310 pub fn to_mut_ptr<T>(self) -> *mut T {
311 self.0 as *mut T
312 }
313
314 /// converts the Address to a Rust reference
315 ///
316 /// # Safety
317 /// The caller must guarantee the address actually points to a Rust object.
318 pub unsafe fn as_ref<'a, T>(self) -> &'a T {
319 &*self.to_mut_ptr()
320 }
321
322 /// converts the Address to a mutable Rust reference
323 ///
324 /// # Safety
325 /// The caller must guarantee the address actually points to a Rust object.
326 pub unsafe fn as_mut_ref<'a, T>(self) -> &'a mut T {
327 &mut *self.to_mut_ptr()
328 }
329
330 /// converts the Address to a pointer-sized integer
331 pub const fn as_usize(self) -> usize {
332 self.0
333 }
334
335 /// returns the chunk index for this address
336 pub fn chunk_index(self) -> usize {
337 use crate::util::conversions;
338 conversions::address_to_chunk_index(self)
339 }
340
341 /// return true if the referenced memory is mapped
342 pub fn is_mapped(self) -> bool {
343 if self.0 == 0 {
344 false
345 } else {
346 MMAPPER.is_mapped_address(self)
347 }
348 }
349
350 /// Returns the intersection of the two address ranges. The returned range could
351 /// be empty if there is no intersection between the ranges.
352 pub fn range_intersection(r1: &Range<Address>, r2: &Range<Address>) -> Range<Address> {
353 r1.start.max(r2.start)..r1.end.min(r2.end)
354 }
355}
356
357/// allows print Address as upper-case hex value
358impl fmt::UpperHex for Address {
359 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
360 write!(f, "{:X}", self.0)
361 }
362}
363
364/// allows print Address as lower-case hex value
365impl fmt::LowerHex for Address {
366 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
367 write!(f, "{:x}", self.0)
368 }
369}
370
371/// allows Display format the Address (as upper-case hex value with 0x prefix)
372impl fmt::Display for Address {
373 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
374 write!(f, "{:#x}", self.0)
375 }
376}
377
378/// allows Debug format the Address (as upper-case hex value with 0x prefix)
379impl fmt::Debug for Address {
380 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
381 write!(f, "{:#x}", self.0)
382 }
383}
384
385impl std::str::FromStr for Address {
386 type Err = std::num::ParseIntError;
387
388 fn from_str(s: &str) -> Result<Self, Self::Err> {
389 let raw: usize = s.parse()?;
390 Ok(Address(raw))
391 }
392}
393
394#[cfg(test)]
395mod tests {
396 use crate::util::Address;
397
398 #[test]
399 fn align_up() {
400 unsafe {
401 assert_eq!(
402 Address::from_usize(0x10).align_up(0x10),
403 Address::from_usize(0x10)
404 );
405 assert_eq!(
406 Address::from_usize(0x11).align_up(0x10),
407 Address::from_usize(0x20)
408 );
409 assert_eq!(
410 Address::from_usize(0x20).align_up(0x10),
411 Address::from_usize(0x20)
412 );
413 }
414 }
415
416 #[test]
417 fn align_down() {
418 unsafe {
419 assert_eq!(
420 Address::from_usize(0x10).align_down(0x10),
421 Address::from_usize(0x10)
422 );
423 assert_eq!(
424 Address::from_usize(0x11).align_down(0x10),
425 Address::from_usize(0x10)
426 );
427 assert_eq!(
428 Address::from_usize(0x20).align_down(0x10),
429 Address::from_usize(0x20)
430 );
431 }
432 }
433
434 #[test]
435 fn is_aligned_to() {
436 unsafe {
437 assert!(Address::from_usize(0x10).is_aligned_to(0x10));
438 assert!(!Address::from_usize(0x11).is_aligned_to(0x10));
439 assert!(Address::from_usize(0x10).is_aligned_to(0x8));
440 assert!(!Address::from_usize(0x10).is_aligned_to(0x20));
441 }
442 }
443
444 #[test]
445 fn bit_and() {
446 unsafe {
447 assert_eq!(
448 Address::from_usize(0b1111_1111_1100usize) & 0b1010u8,
449 0b1000u8
450 );
451 assert_eq!(
452 Address::from_usize(0b1111_1111_1100usize) & 0b1000_0000_1010usize,
453 0b1000_0000_1000usize
454 );
455 }
456 }
457
458 #[test]
459 fn bit_or() {
460 unsafe {
461 assert_eq!(
462 Address::from_usize(0b1111_1111_1100usize) | 0b1010u8,
463 0b1111_1111_1110usize
464 );
465 assert_eq!(
466 Address::from_usize(0b1111_1111_1100usize) | 0b1000_0000_1010usize,
467 0b1111_1111_1110usize
468 );
469 }
470 }
471}
472
473use crate::vm::VMBinding;
474
475/// `ObjectReference` represents address for an object. Compared with `Address`, operations allowed
476/// on `ObjectReference` are very limited. No address arithmetics are allowed for `ObjectReference`.
477/// The idea is from the paper [Demystifying Magic: High-level Low-level Programming (VEE09)][FBC09]
478/// and [JikesRVM].
479///
480/// In MMTk, `ObjectReference` holds a non-zero address, i.e. its **raw address**. It must satisfy
481/// the following requirements.
482///
483/// - It uniquely references an MMTk object.
484/// - The address must be within the address range of the object it refers to.
485/// - The address must be word-aligned.
486/// - It must be efficient to access object metadata from an `ObjectReference`.
487///
488/// Each `ObjectReference` uniquely identifies exactly one MMTk object. There is no "null
489/// reference" (see below for details).
490///
491/// Conversely, each object has a unique (raw) address used for `ObjectReference`. That address is
492/// nominated by the VM binding right after an object is allocated in the MMTk heap (i.e. the
493/// argument of [`crate::memory_manager::post_alloc`]). The same address is used by all
494/// `ObjectReference` instances that refer to that object until the object is moved, at which time
495/// the VM binding shall choose another address to use as the `ObjectReference` of the new copy (in
496/// [`crate::vm::ObjectModel::copy`] or [`crate::vm::ObjectModel::get_reference_when_copied_to`])
497/// until the object is moved again.
498///
499/// In addition to the raw address, there are also two addresses related to each object allocated in
500/// MMTk heap, namely **starting address** and **header address**. See the
501/// [`crate::vm::ObjectModel`] trait for their precise definition.
502///
503/// The VM binding may, in theory, pick any aligned address within the object, and it doesn't have
504/// to be the starting address. However, during tracing, MMTk will need to access object metadata
505/// from a `ObjectReference`. Particularly, it needs to identify reference fields, and query
506/// information about the object, such as object size. Such information is usually accessed from
507/// object headers. The choice of `ObjectReference` must make such accesses efficient.
508///
509/// Because the raw address is within the object, MMTk will also use the raw address to identify the
510/// space or region (chunk, block, line, etc.) that contains the object, and to access side metadata
511/// and the SFTMap. If a VM binding needs to access side metadata directly (particularly, setting
512/// the "valid-object (VO) bit" in allocation fast paths), it shall use the raw address to compute
513/// the byte and bit address of the metadata bits.
514///
515/// # Notes
516///
517/// ## About VMs own concepts of "object references"
518///
519/// A runtime may define its own concept of "object references" differently from MMTk's
520/// `ObjectReference` type. It may define its object reference as
521///
522/// - the starting address of an object,
523/// - an address inside an object,
524/// - an address at a certain offset outside an object,
525/// - a handle that points to an indirection table entry where a pointer to the object is held, or
526/// - anything else that refers to an object.
527///
528/// Regardless, when passing an `ObjectReference` value to MMTk through the API, MMTk expectes its
529/// value to satisfy MMTk's definition. This means MMTk's `ObjectReference` may not be the value
530/// held in an object field. Some VM bindings may need to do conversions when passing object
531/// references to MMTk. For example, adding an offset to the VM-level object reference so that the
532/// resulting address is within the object. When using handles, the VM binding may use the *pointer
533/// stored in the entry* of the indirection table instead of the *pointer to the entry* itself as
534/// MMTk-level `ObjectReference`.
535///
536/// ## About null references
537///
538/// An [`ObjectReference`] always refers to an object. Some VMs have special values (such as `null`
539/// in Java) that do not refer to any object. Those values cannot be represented by
540/// `ObjectReference`. When scanning roots and object fields, the VM binding should ignore slots
541/// that do not hold a reference to an object. Specifically, [`crate::vm::slot::Slot::load`]
542/// returns `Option<ObjectReference>`. It can return `None` so that MMTk skips that slot.
543///
544/// `Option<ObjectReference>` should be used for the cases where a non-null object reference may or
545/// may not exist, That includes several API functions, including [`crate::vm::slot::Slot::load`].
546/// [`ObjectReference`] is backed by `NonZeroUsize` which cannot be zero, and it has the
547/// `#[repr(transparent)]` attribute. Thanks to [null pointer optimization (NPO)][NPO],
548/// `Option<ObjectReference>` has the same size as `NonZeroUsize` and `usize`.
549///
550/// For the convenience of passing `Option<ObjectReference>` to and from native (C/C++) programs,
551/// mmtk-core provides [`crate::util::api_util::NullableObjectReference`].
552///
553/// ## About the `VMSpace`
554///
555/// The `VMSpace` is managed by the VM binding. The VM binding declare ranges of memory as part of
556/// the `VMSpace`, but MMTk never allocates into it. The VM binding allocates objects into the
557/// `VMSpace` (usually by mapping boot-images), and refers to objects in the `VMSpace` using
558/// `ObjectReference`s whose raw addresses point inside those objects (and must be word-aligned,
559/// too). MMTk will access metadata using methods of [`ObjectModel`] like other objects. MMTk also
560/// has side metadata available for objects in the `VMSpace`.
561///
562/// ## About `ObjectReference` pointing outside MMTk spaces
563///
564/// If a VM binding implements [`crate::vm::ActivePlan::vm_trace_object`], `ObjectReference` is
565/// allowed to point to locations outside any MMTk spaces. When tracing objects, such
566/// `ObjectReference` values will be processed by `ActivePlan::vm_trace_object` so that the VM
567/// binding can trace its own allocated objects during GC. However, **this is an experimental
568/// feature**, and may not interact well with other parts of MMTk. Notably, MMTk will not allocate
569/// side metadata for such `ObjectReference`, and attempts to access side metadata with a non-MMTk
570/// `ObjectReference` will result in crash. Use with caution.
571///
572/// [FBC09]: https://dl.acm.org/doi/10.1145/1508293.1508305
573/// [JikesRVM]: https://www.jikesrvm.org/
574/// [`ObjectModel`]: crate::vm::ObjectModel
575/// [NPO]: https://doc.rust-lang.org/std/option/index.html#representation
576#[repr(transparent)]
577#[derive(Copy, Clone, Eq, Hash, PartialOrd, Ord, PartialEq, NoUninit)]
578pub struct ObjectReference(NonZeroUsize);
579
580impl ObjectReference {
581 /// The required minimal alignment for object reference. If the object reference's raw address is not aligned to this value,
582 /// you will see an assertion failure in the debug build when constructing an object reference instance.
583 pub const ALIGNMENT: usize = crate::util::constants::BYTES_IN_ADDRESS;
584
585 /// Cast the object reference to its raw address.
586 pub fn to_raw_address(self) -> Address {
587 Address(self.0.get())
588 }
589
590 /// Cast a raw address to an object reference.
591 ///
592 /// If `addr` is 0, the result is `None`.
593 pub fn from_raw_address(addr: Address) -> Option<ObjectReference> {
594 debug_assert!(
595 addr.is_aligned_to(Self::ALIGNMENT),
596 "ObjectReference is required to be word aligned. addr: {addr}"
597 );
598 NonZeroUsize::new(addr.0).map(ObjectReference)
599 }
600
601 /// Like `from_raw_address`, but assume `addr` is not zero. This can be used to elide a check
602 /// against zero for performance-critical code.
603 ///
604 /// # Safety
605 ///
606 /// This method assumes `addr` is not zero. It should only be used in cases where we know at
607 /// compile time that the input cannot be zero. For example, if we compute the address by
608 /// adding a positive offset to a non-zero address, we know the result must not be zero.
609 pub unsafe fn from_raw_address_unchecked(addr: Address) -> ObjectReference {
610 debug_assert!(!addr.is_zero());
611 debug_assert!(
612 addr.is_aligned_to(Self::ALIGNMENT),
613 "ObjectReference is required to be word aligned. addr: {addr}"
614 );
615 ObjectReference(NonZeroUsize::new_unchecked(addr.0))
616 }
617
618 /// Get the header base address from an object reference. This method is used by MMTk to get a base address for the
619 /// object header, and access the object header. This method is syntactic sugar for [`crate::vm::ObjectModel::ref_to_header`].
620 /// See the comments on [`crate::vm::ObjectModel::ref_to_header`].
621 pub fn to_header<VM: VMBinding>(self) -> Address {
622 use crate::vm::ObjectModel;
623 VM::VMObjectModel::ref_to_header(self)
624 }
625
626 /// Get the start of the allocation address for the object. This method is used by MMTk to get the start of the allocation
627 /// address originally returned from [`crate::memory_manager::alloc`] for the object.
628 /// This method is syntactic sugar for [`crate::vm::ObjectModel::ref_to_object_start`]. See comments on [`crate::vm::ObjectModel::ref_to_object_start`].
629 pub fn to_object_start<VM: VMBinding>(self) -> Address {
630 use crate::vm::ObjectModel;
631 let object_start = VM::VMObjectModel::ref_to_object_start(self);
632 debug_assert!(!VM::VMObjectModel::UNIFIED_OBJECT_REFERENCE_ADDRESS || object_start == self.to_raw_address(), "The binding claims unified object reference address, but for object reference {}, ref_to_object_start() returns {}", self, object_start);
633 debug_assert!(
634 self.to_raw_address()
635 >= object_start + VM::VMObjectModel::OBJECT_REF_OFFSET_LOWER_BOUND,
636 "The invariant `object_ref >= object_start + OBJECT_REF_OFFSET_LOWER_BOUND` is violated. \
637 object_ref: {}, object_start: {}, OBJECT_REF_OFFSET_LOWER_BOUND: {}",
638 self.to_raw_address(),
639 object_start,
640 VM::VMObjectModel::OBJECT_REF_OFFSET_LOWER_BOUND,
641 );
642 object_start
643 }
644
645 /// Is the object reachable, determined by the policy?
646 ///
647 /// # Scope
648 ///
649 /// This method is primarily used during weak reference processing. It can check if an object
650 /// (particularly finalizable objects and objects pointed by weak references) has been reached
651 /// by following strong references or weak references of higher strength.
652 ///
653 /// This method can also be used during tracing for debug purposes.
654 ///
655 /// When called at other times, particularly during mutator time, the behavior is specific to
656 /// the implementation of the plan and policy due to their strategies of metadata clean-up. If
657 /// the VM needs to know if any given reference is still valid, it should instead use the valid
658 /// object bit (VO-bit) metadata which is enabled by the Cargo feature "vo_bit".
659 ///
660 /// # Return value
661 ///
662 /// It returns `true` if one of the following is true:
663 ///
664 /// 1. The object has been traced (i.e. reached) since tracing started.
665 /// 2. The policy conservatively considers the object reachable even though it has not been
666 /// traced.
667 /// - Particularly, if the plan is generational, this method will return `true` if the
668 /// object is mature during nursery GC.
669 ///
670 /// Due to the conservativeness, if this method returns `true`, it does not necessarily mean the
671 /// object must be reachable from roots. In generational GC, mature objects can be unreachable
672 /// from roots while the GC chooses not to reclaim their memory during nursery GC. Conversely,
673 /// all young objects reachable from the remembered set are retained even though some mature
674 /// objects in the remembered set can be unreachable in the first place. (This is known as
675 /// *nepotism* in GC literature.)
676 ///
677 /// Note: Objects in ImmortalSpace may have `is_live = true` but are actually unreachable.
678 pub fn is_reachable(self) -> bool {
679 unsafe { SFT_MAP.get_unchecked(self.to_raw_address()) }.is_reachable(self)
680 }
681
682 /// Is the object live, determined by the policy?
683 pub fn is_live(self) -> bool {
684 unsafe { SFT_MAP.get_unchecked(self.to_raw_address()) }.is_live(self)
685 }
686
687 /// Can the object be moved?
688 pub fn is_movable(self) -> bool {
689 unsafe { SFT_MAP.get_unchecked(self.to_raw_address()) }.is_movable()
690 }
691
692 /// Get forwarding pointer if the object is forwarded.
693 pub fn get_forwarded_object(self) -> Option<Self> {
694 unsafe { SFT_MAP.get_unchecked(self.to_raw_address()) }.get_forwarded_object(self)
695 }
696
697 /// Is the object in any MMTk spaces?
698 pub fn is_in_any_space(self) -> bool {
699 unsafe { SFT_MAP.get_unchecked(self.to_raw_address()) }.is_in_space(self)
700 }
701
702 /// Is the object sane?
703 #[cfg(feature = "sanity")]
704 pub fn is_sane(self) -> bool {
705 unsafe { SFT_MAP.get_unchecked(self.to_raw_address()) }.is_sane()
706 }
707}
708
709/// allows print Address as upper-case hex value
710impl fmt::UpperHex for ObjectReference {
711 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
712 write!(f, "{:X}", self.0)
713 }
714}
715
716/// allows print Address as lower-case hex value
717impl fmt::LowerHex for ObjectReference {
718 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
719 write!(f, "{:x}", self.0)
720 }
721}
722
723/// allows Display format the Address (as upper-case hex value with 0x prefix)
724impl fmt::Display for ObjectReference {
725 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
726 write!(f, "{:#x}", self.0)
727 }
728}
729
730/// allows Debug format the Address (as upper-case hex value with 0x prefix)
731impl fmt::Debug for ObjectReference {
732 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
733 write!(f, "{:#x}", self.0)
734 }
735}