mmtk/util/heap/layout/
vm_layout.rs1use std::ptr::addr_of;
4use std::sync::atomic::AtomicBool;
5
6use atomic::Ordering;
7
8use super::heap_parameters::*;
9use crate::util::constants::*;
10use crate::util::Address;
11
12use crate::util::conversions::{chunk_align_down, chunk_align_up};
13
14pub const LOG_BYTES_IN_CHUNK: usize = 22;
16pub const BYTES_IN_CHUNK: usize = 1 << LOG_BYTES_IN_CHUNK;
18pub const CHUNK_MASK: usize = (1 << LOG_BYTES_IN_CHUNK) - 1;
20pub const PAGES_IN_CHUNK: usize = 1 << (LOG_BYTES_IN_CHUNK - LOG_BYTES_IN_PAGE as usize);
22
23#[derive(Clone, Debug)]
25pub struct VMLayout {
26 pub log_address_space: usize,
28 pub heap_start: Address,
30 pub heap_end: Address,
32 pub log_space_extent: usize,
35 pub force_use_contiguous_spaces: bool,
39}
40
41impl VMLayout {
42 #[cfg(target_pointer_width = "32")]
43 pub const LOG_ARCH_ADDRESS_SPACE: usize = 32;
45 #[cfg(target_pointer_width = "64")]
46 pub const LOG_ARCH_ADDRESS_SPACE: usize = 47;
48
49 pub const fn max_space_extent(&self) -> usize {
52 1 << self.log_space_extent
53 }
54 pub const fn available_start(&self) -> Address {
56 self.heap_start
57 }
58 pub const fn available_end(&self) -> Address {
60 self.heap_end
61 }
62 pub const fn available_bytes(&self) -> usize {
64 self.available_end().get_extent(self.available_start())
65 }
66 pub const fn max_chunks(&self) -> usize {
68 1 << self.log_max_chunks()
69 }
70 pub const fn log_max_chunks(&self) -> usize {
72 Self::LOG_ARCH_ADDRESS_SPACE - LOG_BYTES_IN_CHUNK
73 }
74 pub(crate) fn space_shift_64(&self) -> usize {
77 self.log_space_extent
78 }
79 pub(crate) fn space_mask_64(&self) -> usize {
83 ((1 << LOG_MAX_SPACES) - 1) << self.space_shift_64()
84 }
85 pub(crate) fn space_size_64(&self) -> usize {
90 self.max_space_extent()
91 }
92 pub(crate) fn log_pages_in_space64(&self) -> usize {
94 self.log_space_extent - LOG_BYTES_IN_PAGE as usize
95 }
96 pub(crate) fn pages_in_space64(&self) -> usize {
98 1 << self.log_pages_in_space64()
99 }
100
101 pub(crate) fn address_mask(&self) -> usize {
104 0x1f << self.log_space_extent
105 }
106
107 const fn validate(&self) {
108 assert!(self.heap_start.is_aligned_to(BYTES_IN_CHUNK));
109 assert!(self.heap_end.is_aligned_to(BYTES_IN_CHUNK));
110 assert!(self.heap_start.as_usize() < self.heap_end.as_usize());
111 assert!(self.log_address_space <= Self::LOG_ARCH_ADDRESS_SPACE);
112 assert!(self.log_space_extent <= self.log_address_space);
113 if self.force_use_contiguous_spaces {
114 assert!(self.log_space_extent <= (self.log_address_space - LOG_MAX_SPACES));
115 assert!(self.heap_start.is_aligned_to(self.max_space_extent()));
116 }
117 }
118
119 pub(crate) fn validate_address_space(&self) {
120 let log_mappable_bytes = crate::mmtk::MMAPPER.log_mappable_bytes();
121 assert!(
122 self.log_address_space <= log_mappable_bytes as usize,
123 "log_address_space is {log_address_space}, but \
124 the MMAPPER can only handle up to {log_mappable_bytes} bits",
125 log_address_space = self.log_address_space,
126 );
127 }
128}
129
130impl VMLayout {
131 pub const fn new_32bit() -> Self {
133 let layout32 = Self {
134 log_address_space: 32,
135 heap_start: chunk_align_down(unsafe { Address::from_usize(0x8000_0000) }),
136 heap_end: chunk_align_up(unsafe { Address::from_usize(0xd000_0000) }),
137 log_space_extent: 31,
138 force_use_contiguous_spaces: false,
139 };
140 layout32.validate();
141 layout32
142 }
143 #[cfg(target_pointer_width = "64")]
145 pub const fn new_64bit() -> Self {
146 let layout64 = Self {
147 log_address_space: 47,
148 heap_start: chunk_align_down(unsafe {
149 Address::from_usize(0x0000_0200_0000_0000usize)
150 }),
151 heap_end: chunk_align_up(unsafe { Address::from_usize(0x0000_2200_0000_0000usize) }),
152 log_space_extent: 41,
153 force_use_contiguous_spaces: true,
154 };
155 layout64.validate();
156 layout64
157 }
158
159 pub(crate) fn set_custom_vm_layout(constants: VMLayout) {
162 if cfg!(debug_assertions) {
163 assert!(
164 !VM_LAYOUT_FETCHED.load(Ordering::SeqCst),
165 "vm_layout is already been used before setup"
166 );
167 }
168 constants.validate();
169 unsafe {
170 VM_LAYOUT = constants;
171 }
172 }
173}
174
175impl std::default::Default for VMLayout {
177 #[cfg(target_pointer_width = "32")]
178 fn default() -> Self {
179 Self::new_32bit()
180 }
181
182 #[cfg(target_pointer_width = "64")]
183 fn default() -> Self {
184 Self::new_64bit()
185 }
186}
187
188#[cfg(target_pointer_width = "32")]
189static mut VM_LAYOUT: VMLayout = VMLayout::new_32bit();
190#[cfg(target_pointer_width = "64")]
191static mut VM_LAYOUT: VMLayout = VMLayout::new_64bit();
192
193static VM_LAYOUT_FETCHED: AtomicBool = AtomicBool::new(false);
194
195pub fn vm_layout() -> &'static VMLayout {
199 if cfg!(debug_assertions) {
200 VM_LAYOUT_FETCHED.store(true, Ordering::SeqCst);
201 }
202 unsafe { &*addr_of!(VM_LAYOUT) }
203}