mmtk/policy/immix/
block.rs1use super::defrag::Histogram;
2use super::line::Line;
3use super::ImmixSpace;
4use crate::util::constants::*;
5use crate::util::heap::blockpageresource::BlockPool;
6use crate::util::heap::chunk_map::Chunk;
7use crate::util::linear_scan::{Region, RegionIterator};
8use crate::util::metadata::side_metadata::{MetadataByteArrayRef, SideMetadataSpec};
9#[cfg(feature = "vo_bit")]
10use crate::util::metadata::vo_bit;
11#[cfg(feature = "object_pinning")]
12use crate::util::metadata::MetadataSpec;
13use crate::util::object_enum::BlockMayHaveObjects;
14use crate::util::Address;
15use crate::vm::*;
16use std::sync::atomic::Ordering;
17
18#[derive(Debug, PartialEq, Clone, Copy)]
20pub enum BlockState {
21 Unallocated,
23 Unmarked,
25 Marked,
27 Reusable { unavailable_lines: u8 },
29}
30
31impl BlockState {
32 const MARK_UNALLOCATED: u8 = 0;
34 const MARK_UNMARKED: u8 = u8::MAX;
36 const MARK_MARKED: u8 = u8::MAX - 1;
38}
39
40impl From<u8> for BlockState {
41 fn from(state: u8) -> Self {
42 match state {
43 Self::MARK_UNALLOCATED => BlockState::Unallocated,
44 Self::MARK_UNMARKED => BlockState::Unmarked,
45 Self::MARK_MARKED => BlockState::Marked,
46 unavailable_lines => BlockState::Reusable { unavailable_lines },
47 }
48 }
49}
50
51impl From<BlockState> for u8 {
52 fn from(state: BlockState) -> Self {
53 match state {
54 BlockState::Unallocated => BlockState::MARK_UNALLOCATED,
55 BlockState::Unmarked => BlockState::MARK_UNMARKED,
56 BlockState::Marked => BlockState::MARK_MARKED,
57 BlockState::Reusable { unavailable_lines } => unavailable_lines,
58 }
59 }
60}
61
62impl BlockState {
63 pub const fn is_reusable(&self) -> bool {
65 matches!(self, BlockState::Reusable { .. })
66 }
67}
68
69#[repr(transparent)]
71#[derive(Debug, Clone, Copy, PartialOrd, PartialEq)]
72pub struct Block(Address);
73
74impl Region for Block {
75 #[cfg(not(feature = "immix_smaller_block"))]
76 const LOG_BYTES: usize = 15;
77 #[cfg(feature = "immix_smaller_block")]
78 const LOG_BYTES: usize = 13;
79
80 fn from_aligned_address(address: Address) -> Self {
81 debug_assert!(address.is_aligned_to(Self::BYTES));
82 Self(address)
83 }
84
85 fn start(&self) -> Address {
86 self.0
87 }
88}
89
90impl BlockMayHaveObjects for Block {
91 fn may_have_objects(&self) -> bool {
92 self.get_state() != BlockState::Unallocated
93 }
94}
95
96impl Block {
97 pub const LOG_PAGES: usize = Self::LOG_BYTES - LOG_BYTES_IN_PAGE as usize;
99 pub const PAGES: usize = 1 << Self::LOG_PAGES;
101 pub const LOG_LINES: usize = Self::LOG_BYTES - Line::LOG_BYTES;
103 pub const LINES: usize = 1 << Self::LOG_LINES;
105
106 pub const DEFRAG_STATE_TABLE: SideMetadataSpec =
108 crate::util::metadata::side_metadata::spec_defs::IX_BLOCK_DEFRAG;
109
110 pub const MARK_TABLE: SideMetadataSpec =
112 crate::util::metadata::side_metadata::spec_defs::IX_BLOCK_MARK;
113
114 pub fn chunk(&self) -> Chunk {
116 Chunk::from_unaligned_address(self.0)
117 }
118
119 #[allow(clippy::assertions_on_constants)]
121 pub fn line_mark_table(&self) -> MetadataByteArrayRef<{ Block::LINES }> {
122 debug_assert!(!super::BLOCK_ONLY);
123 MetadataByteArrayRef::<{ Block::LINES }>::new(&Line::MARK_TABLE, self.start(), Self::BYTES)
124 }
125
126 pub fn get_state(&self) -> BlockState {
128 let byte = Self::MARK_TABLE.load_atomic::<u8>(self.start(), Ordering::SeqCst);
129 byte.into()
130 }
131
132 pub fn set_state(&self, state: BlockState) {
134 let state = u8::from(state);
135 Self::MARK_TABLE.store_atomic::<u8>(self.start(), state, Ordering::SeqCst);
136 }
137
138 const DEFRAG_SOURCE_STATE: u8 = u8::MAX;
141
142 pub fn is_defrag_source(&self) -> bool {
144 let byte = Self::DEFRAG_STATE_TABLE.load_atomic::<u8>(self.start(), Ordering::SeqCst);
145 byte == Self::DEFRAG_SOURCE_STATE
148 }
149
150 pub fn set_as_defrag_source(&self, defrag: bool) {
152 let byte = if defrag { Self::DEFRAG_SOURCE_STATE } else { 0 };
153 Self::DEFRAG_STATE_TABLE.store_atomic::<u8>(self.start(), byte, Ordering::SeqCst);
154 }
155
156 pub fn set_holes(&self, holes: usize) {
158 Self::DEFRAG_STATE_TABLE.store_atomic::<u8>(self.start(), holes as u8, Ordering::SeqCst);
159 }
160
161 pub fn get_holes(&self) -> usize {
163 let byte = Self::DEFRAG_STATE_TABLE.load_atomic::<u8>(self.start(), Ordering::SeqCst);
164 debug_assert_ne!(byte, Self::DEFRAG_SOURCE_STATE);
165 byte as usize
166 }
167
168 pub fn init(&self, copy: bool) {
170 self.set_state(if copy {
171 BlockState::Marked
172 } else {
173 BlockState::Unmarked
174 });
175 Self::DEFRAG_STATE_TABLE.store_atomic::<u8>(self.start(), 0, Ordering::SeqCst);
176 }
177
178 pub fn deinit(&self) {
180 self.set_state(BlockState::Unallocated);
181 }
182
183 pub fn start_line(&self) -> Line {
184 Line::from_aligned_address(self.start())
185 }
186
187 pub fn end_line(&self) -> Line {
188 Line::from_aligned_address(self.end())
189 }
190
191 #[allow(clippy::assertions_on_constants)]
193 pub fn lines(&self) -> RegionIterator<Line> {
194 debug_assert!(!super::BLOCK_ONLY);
195 RegionIterator::<Line>::new(self.start_line(), self.end_line())
196 }
197
198 pub fn sweep<VM: VMBinding>(
201 &self,
202 space: &ImmixSpace<VM>,
203 mark_histogram: &mut Histogram,
204 line_mark_state: Option<u8>,
205 ) -> bool {
206 if super::BLOCK_ONLY {
207 match self.get_state() {
208 BlockState::Unallocated => false,
209 BlockState::Unmarked => {
210 #[cfg(feature = "vo_bit")]
211 vo_bit::helper::on_region_swept::<VM, _>(self, false);
212
213 #[cfg(feature = "object_pinning")]
218 if let MetadataSpec::OnSide(side) = *VM::VMObjectModel::LOCAL_PINNING_BIT_SPEC {
219 side.bzero_metadata(self.start(), Block::BYTES);
220 }
221
222 space.release_block(*self);
224 true
225 }
226 BlockState::Marked => {
227 #[cfg(feature = "vo_bit")]
228 vo_bit::helper::on_region_swept::<VM, _>(self, true);
229
230 false
232 }
233 _ => unreachable!(),
234 }
235 } else {
236 let mut marked_lines = 0;
238 let mut holes = 0;
239 let mut prev_line_is_marked = true;
240 let line_mark_state = line_mark_state.unwrap();
241
242 for line in self.lines() {
243 if line.is_marked(line_mark_state) {
244 marked_lines += 1;
245 prev_line_is_marked = true;
246 } else {
247 if prev_line_is_marked {
248 holes += 1;
249 }
250 if line_mark_state > Line::MAX_MARK_STATE - 2 {
253 line.mark(0);
254 }
255 #[cfg(feature = "immix_zero_on_release")]
256 crate::util::memory::zero(line.start(), Line::BYTES);
257
258 #[cfg(feature = "object_pinning")]
260 if let MetadataSpec::OnSide(side) = *VM::VMObjectModel::LOCAL_PINNING_BIT_SPEC {
261 side.bzero_metadata(line.start(), Line::BYTES);
262 }
263
264 prev_line_is_marked = false;
265 }
266 }
267
268 if marked_lines == 0 {
269 #[cfg(feature = "vo_bit")]
270 vo_bit::helper::on_region_swept::<VM, _>(self, false);
271
272 space.release_block(*self);
274 true
275 } else {
276 if marked_lines != Block::LINES {
278 self.set_state(BlockState::Reusable {
280 unavailable_lines: marked_lines as _,
281 });
282 space.reusable_blocks.push(*self)
283 } else {
284 self.set_state(BlockState::Unmarked);
286 }
287 mark_histogram[holes] += marked_lines;
289 self.set_holes(holes);
291
292 #[cfg(feature = "vo_bit")]
293 vo_bit::helper::on_region_swept::<VM, _>(self, true);
294
295 false
296 }
297 }
298 }
299
300 #[cfg(feature = "vo_bit")]
305 pub fn clear_vo_bits_for_unmarked_regions(&self, line_mark_state: Option<u8>) {
306 match line_mark_state {
307 None => {
308 match self.get_state() {
309 BlockState::Unmarked => {
310 vo_bit::bzero_vo_bit(self.start(), Self::BYTES);
312 }
313 BlockState::Marked => {
314 }
316 _ => unreachable!(),
317 }
318 }
319 Some(state) => {
320 for line in self.lines() {
322 if !line.is_marked(state) {
323 vo_bit::bzero_vo_bit(line.start(), Line::BYTES);
325 }
326 }
327 }
328 }
329 }
330}
331
332pub struct ReusableBlockPool {
334 queue: BlockPool<Block>,
335 num_workers: usize,
336}
337
338impl ReusableBlockPool {
339 pub fn new(num_workers: usize) -> Self {
341 Self {
342 queue: BlockPool::new(num_workers),
343 num_workers,
344 }
345 }
346
347 pub fn len(&self) -> usize {
349 self.queue.len()
350 }
351
352 pub fn push(&self, block: Block) {
354 self.queue.push(block)
355 }
356
357 pub fn pop(&self) -> Option<Block> {
359 self.queue.pop()
360 }
361
362 pub fn reset(&mut self) {
364 self.queue = BlockPool::new(self.num_workers);
365 }
366
367 pub fn iterate_blocks(&self, mut f: impl FnMut(Block)) {
369 self.queue.iterate_blocks(&mut f);
370 }
371
372 pub fn flush_all(&self) {
374 self.queue.flush_all();
375 }
376}