mmtk/util/heap/
space_descriptor.rs

1use bytemuck::Zeroable;
2
3use crate::util::heap::layout::vm_layout::{self, vm_layout};
4use crate::util::Address;
5use std::sync::atomic::{AtomicUsize, Ordering};
6
7const TYPE_BITS: usize = 2;
8#[allow(unused)]
9const TYPE_SHARED: usize = 0;
10const TYPE_CONTIGUOUS: usize = 1;
11const TYPE_CONTIGUOUS_HI: usize = 3;
12const TYPE_MASK: usize = (1 << TYPE_BITS) - 1;
13const SIZE_SHIFT: usize = TYPE_BITS;
14const SIZE_BITS: usize = 10;
15const SIZE_MASK: usize = ((1 << SIZE_BITS) - 1) << SIZE_SHIFT;
16const EXPONENT_SHIFT: usize = SIZE_SHIFT + SIZE_BITS;
17const EXPONENT_BITS: usize = 5;
18const EXPONENT_MASK: usize = ((1 << EXPONENT_BITS) - 1) << EXPONENT_SHIFT;
19const MANTISSA_SHIFT: usize = EXPONENT_SHIFT + EXPONENT_BITS;
20const MANTISSA_BITS: usize = 14;
21// FIXME: The constant `i32::BITS` is inherited from JikesRVM. It looks like the current
22// `SpaceDescriptor` encoding can only encode 32-bit address spaces where each space can only have
23// less than 1024 (`1 << SIZE_BITS`) chunks. In this case, `BASE_EXPONENT`` will depend on
24// `i32::BITS`. If we need to extend `SpaceDescriptor` to 64-bit address spaces, we need to redesign
25// the encoding.
26const BASE_EXPONENT: usize = i32::BITS as usize - MANTISSA_BITS;
27
28const INDEX_MASK: usize = !TYPE_MASK;
29const INDEX_SHIFT: usize = TYPE_BITS;
30
31static DISCONTIGUOUS_SPACE_INDEX: AtomicUsize = AtomicUsize::new(DISCONTIG_INDEX_INCREMENT);
32const DISCONTIG_INDEX_INCREMENT: usize = 1 << TYPE_BITS;
33
34#[derive(Copy, Clone, PartialEq, Debug)]
35#[repr(transparent)]
36pub struct SpaceDescriptor(usize);
37
38unsafe impl Zeroable for SpaceDescriptor {}
39
40impl SpaceDescriptor {
41    pub const UNINITIALIZED: Self = SpaceDescriptor(0);
42
43    pub fn create_descriptor_from_heap_range(start: Address, end: Address) -> SpaceDescriptor {
44        let top = end == vm_layout().heap_end;
45        if vm_layout().force_use_contiguous_spaces {
46            let space_index = if start > vm_layout().heap_end {
47                usize::MAX
48            } else {
49                start >> vm_layout().space_shift_64()
50            };
51            let flags = if top {
52                TYPE_CONTIGUOUS_HI
53            } else {
54                TYPE_CONTIGUOUS
55            };
56            return SpaceDescriptor((space_index << INDEX_SHIFT) | flags);
57        }
58        let chunks = (end - start) >> vm_layout::LOG_BYTES_IN_CHUNK;
59        debug_assert!(!start.is_zero() && chunks > 0 && chunks < (1 << SIZE_BITS));
60        let mut tmp = start >> BASE_EXPONENT;
61        let mut exponent = 0;
62        while (tmp != 0) && ((tmp & 1) == 0) {
63            tmp >>= 1;
64            exponent += 1;
65        }
66        let mantissa = tmp;
67        debug_assert!((tmp << (BASE_EXPONENT + exponent)) == start.as_usize());
68        SpaceDescriptor(
69            (mantissa << MANTISSA_SHIFT)
70                | (exponent << EXPONENT_SHIFT)
71                | (chunks << SIZE_SHIFT)
72                | (if top {
73                    TYPE_CONTIGUOUS_HI
74                } else {
75                    TYPE_CONTIGUOUS
76                }),
77        )
78    }
79
80    pub fn create_descriptor() -> SpaceDescriptor {
81        let next =
82            DISCONTIGUOUS_SPACE_INDEX.fetch_add(DISCONTIG_INDEX_INCREMENT, Ordering::Relaxed);
83        let ret = SpaceDescriptor(next);
84        debug_assert!(!ret.is_contiguous());
85        ret
86    }
87
88    pub fn is_empty(self) -> bool {
89        self.0 == SpaceDescriptor::UNINITIALIZED.0
90    }
91
92    pub fn is_contiguous(self) -> bool {
93        (self.0 & TYPE_CONTIGUOUS) == TYPE_CONTIGUOUS
94    }
95
96    pub fn is_contiguous_hi(self) -> bool {
97        (self.0 & TYPE_MASK) == TYPE_CONTIGUOUS_HI
98    }
99
100    pub fn get_start(self) -> Address {
101        if !vm_layout().force_use_contiguous_spaces {
102            // For 64-bit discontiguous space, use 32-bit start address
103            self.get_start_32()
104        } else {
105            unsafe { Address::from_usize(self.get_index() << vm_layout().log_space_extent) }
106        }
107    }
108
109    fn get_start_32(self) -> Address {
110        debug_assert!(self.is_contiguous());
111
112        let descriptor = self.0;
113        let mantissa = descriptor >> MANTISSA_SHIFT;
114        let exponent = (descriptor & EXPONENT_MASK) >> EXPONENT_SHIFT;
115        unsafe { Address::from_usize(mantissa << (BASE_EXPONENT + exponent)) }
116    }
117
118    #[cfg(target_pointer_width = "64")]
119    pub fn get_extent(self) -> usize {
120        if !vm_layout().force_use_contiguous_spaces {
121            // For 64-bit discontiguous space, use 32-bit extent
122            self.get_extent_32()
123        } else {
124            vm_layout().space_size_64()
125        }
126    }
127
128    #[cfg(target_pointer_width = "32")]
129    pub fn get_extent(self) -> usize {
130        self.get_extent_32()
131    }
132
133    fn get_extent_32(self) -> usize {
134        debug_assert!(self.is_contiguous());
135        let chunks = (self.0 & SIZE_MASK) >> SIZE_SHIFT;
136        chunks << vm_layout::LOG_BYTES_IN_CHUNK
137    }
138
139    pub fn get_index(self) -> usize {
140        (self.0 & INDEX_MASK) >> INDEX_SHIFT
141    }
142}
143
144#[cfg(test)]
145mod tests {
146    use super::*;
147    use crate::util::heap::layout::vm_layout::*;
148
149    #[test]
150    fn create_discontiguous_descriptor() {
151        let d1 = SpaceDescriptor::create_descriptor();
152        assert!(!d1.is_empty());
153        assert!(!d1.is_contiguous());
154        assert!(!d1.is_contiguous_hi());
155
156        let d2 = SpaceDescriptor::create_descriptor();
157        assert!(!d2.is_empty());
158        assert!(!d2.is_contiguous());
159        assert!(!d2.is_contiguous_hi());
160    }
161
162    const TEST_SPACE_SIZE: usize = BYTES_IN_CHUNK * 10;
163
164    #[test]
165    fn create_contiguous_descriptor_at_heap_start() {
166        let d = SpaceDescriptor::create_descriptor_from_heap_range(
167            vm_layout().heap_start,
168            vm_layout().heap_start + TEST_SPACE_SIZE,
169        );
170        assert!(!d.is_empty());
171        assert!(d.is_contiguous());
172        assert!(!d.is_contiguous_hi());
173        assert_eq!(d.get_start(), vm_layout().heap_start);
174        if cfg!(target_pointer_width = "64") {
175            assert_eq!(d.get_extent(), vm_layout().space_size_64());
176        } else {
177            assert_eq!(d.get_extent(), TEST_SPACE_SIZE);
178        }
179    }
180
181    #[test]
182    fn create_contiguous_descriptor_in_heap() {
183        let d = SpaceDescriptor::create_descriptor_from_heap_range(
184            vm_layout().heap_start + TEST_SPACE_SIZE,
185            vm_layout().heap_start + TEST_SPACE_SIZE * 2,
186        );
187        assert!(!d.is_empty());
188        assert!(d.is_contiguous());
189        assert!(!d.is_contiguous_hi());
190        if cfg!(target_pointer_width = "64") {
191            assert_eq!(d.get_start(), vm_layout().heap_start);
192            assert_eq!(d.get_extent(), vm_layout().space_size_64());
193        } else {
194            assert_eq!(d.get_start(), vm_layout().heap_start + TEST_SPACE_SIZE);
195            assert_eq!(d.get_extent(), TEST_SPACE_SIZE);
196        }
197    }
198
199    #[test]
200    fn create_contiguous_descriptor_at_heap_end() {
201        let d = SpaceDescriptor::create_descriptor_from_heap_range(
202            vm_layout().heap_end - TEST_SPACE_SIZE,
203            vm_layout().heap_end,
204        );
205        assert!(!d.is_empty());
206        assert!(d.is_contiguous());
207        assert!(d.is_contiguous_hi());
208        if cfg!(target_pointer_width = "64") {
209            assert_eq!(
210                d.get_start(),
211                vm_layout().heap_end - vm_layout().space_size_64()
212            );
213            assert_eq!(d.get_extent(), vm_layout().space_size_64());
214        } else {
215            assert_eq!(d.get_start(), vm_layout().heap_end - TEST_SPACE_SIZE);
216            assert_eq!(d.get_extent(), TEST_SPACE_SIZE);
217        }
218    }
219}