mmtk/policy/marksweepspace/native_ms/
block.rs1use atomic::Ordering;
4
5use super::BlockList;
6use super::MarkSweepSpace;
7use crate::util::constants::LOG_BYTES_IN_PAGE;
8use crate::util::heap::chunk_map::*;
9use crate::util::linear_scan::Region;
10use crate::util::object_enum::BlockMayHaveObjects;
11use crate::vm::ObjectModel;
12use crate::{
13 util::{
14 metadata::side_metadata::SideMetadataSpec, Address, ObjectReference, OpaquePointer,
15 VMThread,
16 },
17 vm::VMBinding,
18};
19
20use std::num::NonZeroUsize;
21
22#[derive(Clone, Copy, PartialOrd, PartialEq)]
29#[repr(transparent)]
30pub struct Block(NonZeroUsize);
31
32impl std::fmt::Debug for Block {
33 fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
34 write!(f, "Block(0x{:x})", self.0)
35 }
36}
37
38impl Region for Block {
39 const LOG_BYTES: usize = 16;
40
41 fn from_aligned_address(address: Address) -> Self {
42 debug_assert!(address.is_aligned_to(Self::BYTES));
43 debug_assert!(!address.is_zero());
44 Self(unsafe { NonZeroUsize::new_unchecked(address.as_usize()) })
45 }
46
47 fn start(&self) -> Address {
48 unsafe { Address::from_usize(self.0.get()) }
49 }
50}
51
52impl BlockMayHaveObjects for Block {
53 fn may_have_objects(&self) -> bool {
54 self.get_state() != BlockState::Unallocated
55 }
56}
57
58impl Block {
59 pub const LOG_PAGES: usize = Self::LOG_BYTES - LOG_BYTES_IN_PAGE as usize;
61
62 pub const METADATA_SPECS: [SideMetadataSpec; 7] = [
63 Self::MARK_TABLE,
64 Self::NEXT_BLOCK_TABLE,
65 Self::PREV_BLOCK_TABLE,
66 Self::FREE_LIST_TABLE,
67 Self::SIZE_TABLE,
68 Self::BLOCK_LIST_TABLE,
69 Self::TLS_TABLE,
70 ];
71
72 pub const MARK_TABLE: SideMetadataSpec =
74 crate::util::metadata::side_metadata::spec_defs::MS_BLOCK_MARK;
75
76 pub const NEXT_BLOCK_TABLE: SideMetadataSpec =
77 crate::util::metadata::side_metadata::spec_defs::MS_BLOCK_NEXT;
78
79 pub const PREV_BLOCK_TABLE: SideMetadataSpec =
80 crate::util::metadata::side_metadata::spec_defs::MS_BLOCK_PREV;
81
82 pub const FREE_LIST_TABLE: SideMetadataSpec =
83 crate::util::metadata::side_metadata::spec_defs::MS_FREE;
84
85 #[cfg(feature = "malloc_native_mimalloc")]
87 pub const LOCAL_FREE_LIST_TABLE: SideMetadataSpec =
88 crate::util::metadata::side_metadata::spec_defs::MS_LOCAL_FREE;
89
90 #[cfg(feature = "malloc_native_mimalloc")]
91 pub const THREAD_FREE_LIST_TABLE: SideMetadataSpec =
92 crate::util::metadata::side_metadata::spec_defs::MS_THREAD_FREE;
93
94 pub const SIZE_TABLE: SideMetadataSpec =
95 crate::util::metadata::side_metadata::spec_defs::MS_BLOCK_SIZE;
96
97 pub const BLOCK_LIST_TABLE: SideMetadataSpec =
98 crate::util::metadata::side_metadata::spec_defs::MS_BLOCK_LIST;
99
100 pub const TLS_TABLE: SideMetadataSpec =
101 crate::util::metadata::side_metadata::spec_defs::MS_BLOCK_TLS;
102
103 pub fn load_free_list(&self) -> Address {
104 unsafe { Address::from_usize(Block::FREE_LIST_TABLE.load::<usize>(self.start())) }
105 }
106
107 pub fn store_free_list(&self, free_list: Address) {
108 unsafe { Block::FREE_LIST_TABLE.store::<usize>(self.start(), free_list.as_usize()) }
109 }
110
111 #[cfg(feature = "malloc_native_mimalloc")]
112 pub fn load_local_free_list(&self) -> Address {
113 unsafe { Address::from_usize(Block::LOCAL_FREE_LIST_TABLE.load::<usize>(self.start())) }
114 }
115
116 #[cfg(feature = "malloc_native_mimalloc")]
117 pub fn store_local_free_list(&self, local_free: Address) {
118 unsafe { Block::LOCAL_FREE_LIST_TABLE.store::<usize>(self.start(), local_free.as_usize()) }
119 }
120
121 #[cfg(feature = "malloc_native_mimalloc")]
122 pub fn load_thread_free_list(&self) -> Address {
123 unsafe {
124 Address::from_usize(
125 Block::THREAD_FREE_LIST_TABLE.load_atomic::<usize>(self.start(), Ordering::SeqCst),
126 )
127 }
128 }
129
130 #[cfg(feature = "malloc_native_mimalloc")]
131 pub fn store_thread_free_list(&self, thread_free: Address) {
132 unsafe {
133 Block::THREAD_FREE_LIST_TABLE.store::<usize>(self.start(), thread_free.as_usize())
134 }
135 }
136
137 #[cfg(feature = "malloc_native_mimalloc")]
138 pub fn cas_thread_free_list(&self, old_thread_free: Address, new_thread_free: Address) -> bool {
139 Block::THREAD_FREE_LIST_TABLE
140 .compare_exchange_atomic::<usize>(
141 self.start(),
142 old_thread_free.as_usize(),
143 new_thread_free.as_usize(),
144 Ordering::SeqCst,
145 Ordering::SeqCst,
146 )
147 .is_ok()
148 }
149
150 pub fn load_prev_block(&self) -> Option<Block> {
151 let prev = unsafe { Block::PREV_BLOCK_TABLE.load::<usize>(self.start()) };
152 NonZeroUsize::new(prev).map(Block)
153 }
154
155 pub fn load_next_block(&self) -> Option<Block> {
156 let next = unsafe { Block::NEXT_BLOCK_TABLE.load::<usize>(self.start()) };
157 NonZeroUsize::new(next).map(Block)
158 }
159
160 pub fn store_next_block(&self, next: Block) {
161 unsafe {
162 Block::NEXT_BLOCK_TABLE.store::<usize>(self.start(), next.start().as_usize());
163 }
164 }
165
166 pub fn clear_next_block(&self) {
167 unsafe {
168 Block::NEXT_BLOCK_TABLE.store::<usize>(self.start(), 0);
169 }
170 }
171
172 pub fn store_prev_block(&self, prev: Block) {
173 unsafe {
174 Block::PREV_BLOCK_TABLE.store::<usize>(self.start(), prev.start().as_usize());
175 }
176 }
177
178 pub fn clear_prev_block(&self) {
179 unsafe {
180 Block::PREV_BLOCK_TABLE.store::<usize>(self.start(), 0);
181 }
182 }
183
184 pub fn store_block_list(&self, block_list: &BlockList) {
185 let block_list_usize: usize = block_list as *const BlockList as usize;
186 unsafe {
187 Block::BLOCK_LIST_TABLE.store::<usize>(self.start(), block_list_usize);
188 }
189 }
190
191 pub fn load_block_list(&self) -> *mut BlockList {
192 let block_list =
193 Block::BLOCK_LIST_TABLE.load_atomic::<usize>(self.start(), Ordering::SeqCst);
194 block_list as *mut BlockList
195 }
196
197 pub fn load_block_cell_size(&self) -> usize {
198 Block::SIZE_TABLE.load_atomic::<usize>(self.start(), Ordering::SeqCst)
199 }
200
201 pub fn store_block_cell_size(&self, size: usize) {
202 debug_assert_ne!(size, 0);
203 unsafe { Block::SIZE_TABLE.store::<usize>(self.start(), size) }
204 }
205
206 pub fn store_tls(&self, tls: VMThread) {
207 let tls_usize: usize = tls.0.to_address().as_usize();
208 unsafe { Block::TLS_TABLE.store(self.start(), tls_usize) }
209 }
210
211 pub fn load_tls(&self) -> VMThread {
212 let tls = Block::TLS_TABLE.load_atomic::<usize>(self.start(), Ordering::SeqCst);
213 VMThread(OpaquePointer::from_address(unsafe {
214 Address::from_usize(tls)
215 }))
216 }
217
218 pub fn has_free_cells(&self) -> bool {
219 !self.load_free_list().is_zero()
220 }
221
222 pub fn get_state(&self) -> BlockState {
224 let byte = Self::MARK_TABLE.load_atomic::<u8>(self.start(), Ordering::SeqCst);
225 byte.into()
226 }
227
228 pub fn set_state(&self, state: BlockState) {
230 let state = u8::from(state);
231 Self::MARK_TABLE.store_atomic::<u8>(self.start(), state, Ordering::SeqCst);
232 }
233
234 pub fn attempt_release<VM: VMBinding>(self, space: &MarkSweepSpace<VM>) -> bool {
236 match self.get_state() {
237 BlockState::Unallocated => unreachable!(),
239 BlockState::Unmarked => {
240 let block_list = self.load_block_list();
241 unsafe { &mut *block_list }.remove(self);
242 space.release_block(self);
243 true
244 }
245 BlockState::Marked => {
246 false
248 }
249 }
250 }
251
252 pub fn sweep<VM: VMBinding>(&self) {
254 if cfg!(feature = "malloc_native_mimalloc") {
260 unimplemented!()
261 }
262
263 if !VM::USE_ALLOCATION_OFFSET
268 && VM::MAX_ALIGNMENT == VM::MIN_ALIGNMENT
269 && crate::util::conversions::raw_is_aligned(
270 self.load_block_cell_size(),
271 VM::MAX_ALIGNMENT,
272 )
273 && VM::VMObjectModel::UNIFIED_OBJECT_REFERENCE_ADDRESS
274 {
275 self.simple_sweep::<VM>()
277 } else {
278 self.naive_brute_force_sweep::<VM>()
280 }
281 }
282
283 fn simple_sweep<VM: VMBinding>(&self) {
287 let cell_size = self.load_block_cell_size();
288 debug_assert_ne!(cell_size, 0);
289 let mut cell = self.start();
290 let mut last = unsafe { Address::zero() };
291 while cell + cell_size <= self.start() + Block::BYTES {
292 let potential_object = unsafe { ObjectReference::from_raw_address_unchecked(cell) };
296
297 if !VM::VMObjectModel::LOCAL_MARK_BIT_SPEC
298 .is_marked::<VM>(potential_object, Ordering::SeqCst)
299 {
300 #[cfg(feature = "vo_bit")]
303 crate::util::metadata::vo_bit::unset_vo_bit_nocheck(potential_object);
304 unsafe {
305 cell.store::<Address>(last);
306 }
307 last = cell;
308 }
309 cell += cell_size;
310 }
311
312 self.store_free_list(last);
313 }
314
315 fn naive_brute_force_sweep<VM: VMBinding>(&self) {
320 use crate::util::constants::MIN_OBJECT_SIZE;
321
322 let cell_size = self.load_block_cell_size();
324 let mut cell = self.start();
326 let mut last = Address::ZERO;
328 let mut cursor = cell;
330
331 debug!("Sweep block {:?}, cell size {}", self, cell_size);
332
333 while cell + cell_size <= self.end() {
334 let potential_object_ref = unsafe {
336 ObjectReference::from_raw_address_unchecked(
338 cursor + VM::VMObjectModel::OBJECT_REF_OFFSET_LOWER_BOUND,
339 )
340 };
341 trace!(
342 "{:?}: cell = {}, last cell in free list = {}, cursor = {}, potential object = {}",
343 self,
344 cell,
345 last,
346 cursor,
347 potential_object_ref
348 );
349
350 if VM::VMObjectModel::LOCAL_MARK_BIT_SPEC
351 .is_marked::<VM>(potential_object_ref, Ordering::SeqCst)
352 {
353 debug!("{:?} Live cell: {}", self, cell);
354 cell += cell_size;
357 cursor = cell;
358 } else {
359 cursor += MIN_OBJECT_SIZE;
361
362 if cursor >= cell + cell_size {
363 debug!(
365 "{:?} Free cell: {}, last cell in freelist is {}",
366 self, cell, last
367 );
368
369 #[cfg(feature = "vo_bit")]
371 crate::util::metadata::vo_bit::bzero_vo_bit(cell, cell_size);
372
373 debug_assert!(last.is_zero() || (last >= self.start() && last < self.end()));
375 unsafe {
376 cell.store::<Address>(last);
377 }
378 last = cell;
379 cell += cell_size;
380 debug_assert_eq!(cursor, cell);
381 }
382 }
383 }
384
385 self.store_free_list(last);
386 }
387
388 pub fn chunk(&self) -> Chunk {
390 Chunk::from_unaligned_address(self.start())
391 }
392
393 pub fn init(&self) {
395 self.set_state(BlockState::Unmarked);
396 }
397
398 pub fn deinit(&self) {
400 self.set_state(BlockState::Unallocated);
401 }
402}
403
404#[derive(Debug, PartialEq, Clone, Copy)]
406pub enum BlockState {
407 Unallocated,
409 Unmarked,
411 Marked,
413}
414
415impl BlockState {
416 const MARK_UNALLOCATED: u8 = 0;
418 const MARK_UNMARKED: u8 = u8::MAX;
420 const MARK_MARKED: u8 = u8::MAX - 1;
422}
423
424impl From<u8> for BlockState {
425 fn from(state: u8) -> Self {
426 match state {
427 Self::MARK_UNALLOCATED => BlockState::Unallocated,
428 Self::MARK_UNMARKED => BlockState::Unmarked,
429 Self::MARK_MARKED => BlockState::Marked,
430 _ => unreachable!(),
431 }
432 }
433}
434
435impl From<BlockState> for u8 {
436 fn from(state: BlockState) -> Self {
437 match state {
438 BlockState::Unallocated => BlockState::MARK_UNALLOCATED,
439 BlockState::Unmarked => BlockState::MARK_UNMARKED,
440 BlockState::Marked => BlockState::MARK_MARKED,
441 }
442 }
443}