mmtk/util/heap/layout/
vm_layout.rs

1//! The module defines virutal memory layout parameters.
2
3use std::ptr::addr_of;
4use std::sync::atomic::AtomicBool;
5
6use atomic::Ordering;
7
8use super::heap_parameters::*;
9use crate::util::constants::*;
10use crate::util::Address;
11
12use crate::util::conversions::{chunk_align_down, chunk_align_up};
13
14/// log_2 of the coarsest unit of address space allocation.
15pub const LOG_BYTES_IN_CHUNK: usize = 22;
16/// Coarsest unit of address space allocation.
17pub const BYTES_IN_CHUNK: usize = 1 << LOG_BYTES_IN_CHUNK;
18/// Mask for chunk size.
19pub const CHUNK_MASK: usize = (1 << LOG_BYTES_IN_CHUNK) - 1;
20/// Coarsest unit of address space allocation, in pages
21pub const PAGES_IN_CHUNK: usize = 1 << (LOG_BYTES_IN_CHUNK - LOG_BYTES_IN_PAGE as usize);
22
23/// Runtime-initialized virtual memory constants
24#[derive(Clone, Debug)]
25pub struct VMLayout {
26    /// log_2 of the addressable heap virtual space.
27    pub log_address_space: usize,
28    /// Lowest virtual address used by the virtual machine. Should be chunk aligned.
29    pub heap_start: Address,
30    /// Highest virtual address used by the virtual machine. Should be chunk aligned.
31    pub heap_end: Address,
32    /// An upper bound on the extent of any space in the
33    /// current memory layout
34    pub log_space_extent: usize,
35    /// Should mmtk enable contiguous spaces and virtual memory for all spaces?
36    /// For normal 64-bit config, this should be set to true. Each space should own a contiguous piece of virtual memory.
37    /// For 32-bit or 64-bit compressed heap, we don't have enough virtual memory, so this should be set to false.
38    pub force_use_contiguous_spaces: bool,
39}
40
41impl VMLayout {
42    #[cfg(target_pointer_width = "32")]
43    /// The maximum virtual memory address space that can be used on the target.
44    pub const LOG_ARCH_ADDRESS_SPACE: usize = 32;
45    #[cfg(target_pointer_width = "64")]
46    /// The maximum virtual memory address space that can be used on the target.
47    pub const LOG_ARCH_ADDRESS_SPACE: usize = 47;
48
49    /// An upper bound on the extent of any space in the
50    /// current memory layout
51    pub const fn max_space_extent(&self) -> usize {
52        1 << self.log_space_extent
53    }
54    /// Lowest virtual address available for MMTk to manage.
55    pub const fn available_start(&self) -> Address {
56        self.heap_start
57    }
58    /// Highest virtual address available for MMTk to manage.
59    pub const fn available_end(&self) -> Address {
60        self.heap_end
61    }
62    /// Size of the address space available to the MMTk heap.
63    pub const fn available_bytes(&self) -> usize {
64        self.available_end().get_extent(self.available_start())
65    }
66    /// Maximum number of chunks we need to track.  Only used in 32-bit layout.
67    pub const fn max_chunks(&self) -> usize {
68        1 << self.log_max_chunks()
69    }
70    /// log_2 of the maximum number of chunks we need to track.  Only used in 32-bit layout.
71    pub const fn log_max_chunks(&self) -> usize {
72        Self::LOG_ARCH_ADDRESS_SPACE - LOG_BYTES_IN_CHUNK
73    }
74    /// Number of bits to shift a space index into/out of a virtual address.
75    /// In a 32-bit model, use a dummy value so that the compiler doesn't barf.
76    pub(crate) fn space_shift_64(&self) -> usize {
77        self.log_space_extent
78    }
79    /// Bitwise mask to isolate a space index in a virtual address.
80    /// We can't express this constant in a 32-bit environment, hence the
81    /// conditional definition.
82    pub(crate) fn space_mask_64(&self) -> usize {
83        ((1 << LOG_MAX_SPACES) - 1) << self.space_shift_64()
84    }
85    /// Size of each space in the 64-bit memory layout
86    /// We can't express this constant in a 32-bit environment, hence the
87    /// conditional definition.
88    /// FIXME: When Compiling for 32 bits this expression makes no sense
89    pub(crate) fn space_size_64(&self) -> usize {
90        self.max_space_extent()
91    }
92    /// log_2 of the number of pages in a 64-bit space
93    pub(crate) fn log_pages_in_space64(&self) -> usize {
94        self.log_space_extent - LOG_BYTES_IN_PAGE as usize
95    }
96    /// The number of pages in a 64-bit space
97    pub(crate) fn pages_in_space64(&self) -> usize {
98        1 << self.log_pages_in_space64()
99    }
100
101    /// This mask extracts a few bits from address, and use it as index to the space map table.
102    /// When masked with this constant, the index is 1 to 16. If we mask any arbitrary address with this mask, we will get 0 to 31 (32 entries).
103    pub(crate) fn address_mask(&self) -> usize {
104        0x1f << self.log_space_extent
105    }
106
107    const fn validate(&self) {
108        assert!(self.heap_start.is_aligned_to(BYTES_IN_CHUNK));
109        assert!(self.heap_end.is_aligned_to(BYTES_IN_CHUNK));
110        assert!(self.heap_start.as_usize() < self.heap_end.as_usize());
111        assert!(self.log_address_space <= Self::LOG_ARCH_ADDRESS_SPACE);
112        assert!(self.log_space_extent <= self.log_address_space);
113        if self.force_use_contiguous_spaces {
114            assert!(self.log_space_extent <= (self.log_address_space - LOG_MAX_SPACES));
115            assert!(self.heap_start.is_aligned_to(self.max_space_extent()));
116        }
117    }
118
119    pub(crate) fn validate_address_space(&self) {
120        let log_mappable_bytes = crate::mmtk::MMAPPER.log_mappable_bytes();
121        assert!(
122            self.log_address_space <= log_mappable_bytes as usize,
123            "log_address_space is {log_address_space}, but \
124            the MMAPPER can only handle up to {log_mappable_bytes} bits",
125            log_address_space = self.log_address_space,
126        );
127    }
128}
129
130impl VMLayout {
131    /// Normal 32-bit configuration
132    pub const fn new_32bit() -> Self {
133        let layout32 = Self {
134            log_address_space: 32,
135            heap_start: chunk_align_down(unsafe { Address::from_usize(0x8000_0000) }),
136            heap_end: chunk_align_up(unsafe { Address::from_usize(0xd000_0000) }),
137            log_space_extent: 31,
138            force_use_contiguous_spaces: false,
139        };
140        layout32.validate();
141        layout32
142    }
143    /// Normal 64-bit configuration
144    #[cfg(target_pointer_width = "64")]
145    pub const fn new_64bit() -> Self {
146        let layout64 = Self {
147            log_address_space: 47,
148            heap_start: chunk_align_down(unsafe {
149                Address::from_usize(0x0000_0200_0000_0000usize)
150            }),
151            heap_end: chunk_align_up(unsafe { Address::from_usize(0x0000_2200_0000_0000usize) }),
152            log_space_extent: 41,
153            force_use_contiguous_spaces: true,
154        };
155        layout64.validate();
156        layout64
157    }
158
159    /// Custom VM layout constants. VM bindings may use this function for compressed or 39-bit heap support.
160    /// This function must be called before MMTk::new()
161    pub(crate) fn set_custom_vm_layout(constants: VMLayout) {
162        if cfg!(debug_assertions) {
163            assert!(
164                !VM_LAYOUT_FETCHED.load(Ordering::SeqCst),
165                "vm_layout is already been used before setup"
166            );
167        }
168        constants.validate();
169        unsafe {
170            VM_LAYOUT = constants;
171        }
172    }
173}
174
175// Implement default so bindings can selectively change some parameters while using default for others.
176impl std::default::Default for VMLayout {
177    #[cfg(target_pointer_width = "32")]
178    fn default() -> Self {
179        Self::new_32bit()
180    }
181
182    #[cfg(target_pointer_width = "64")]
183    fn default() -> Self {
184        Self::new_64bit()
185    }
186}
187
188#[cfg(target_pointer_width = "32")]
189static mut VM_LAYOUT: VMLayout = VMLayout::new_32bit();
190#[cfg(target_pointer_width = "64")]
191static mut VM_LAYOUT: VMLayout = VMLayout::new_64bit();
192
193static VM_LAYOUT_FETCHED: AtomicBool = AtomicBool::new(false);
194
195/// Get the current virtual memory layout in use.
196/// If the binding would like to set a custom virtual memory layout ([`crate::mmtk::MMTKBuilder::set_vm_layout`]), they should not
197/// call this function before they set a custom layout.
198pub fn vm_layout() -> &'static VMLayout {
199    if cfg!(debug_assertions) {
200        VM_LAYOUT_FETCHED.store(true, Ordering::SeqCst);
201    }
202    unsafe { &*addr_of!(VM_LAYOUT) }
203}