mmtk/policy/immix/
block.rs

1use super::defrag::Histogram;
2use super::line::Line;
3use super::ImmixSpace;
4use crate::util::constants::*;
5use crate::util::heap::blockpageresource::BlockPool;
6use crate::util::heap::chunk_map::Chunk;
7use crate::util::linear_scan::{Region, RegionIterator};
8use crate::util::metadata::side_metadata::{MetadataByteArrayRef, SideMetadataSpec};
9#[cfg(feature = "vo_bit")]
10use crate::util::metadata::vo_bit;
11#[cfg(feature = "object_pinning")]
12use crate::util::metadata::MetadataSpec;
13use crate::util::object_enum::BlockMayHaveObjects;
14use crate::util::Address;
15use crate::vm::*;
16use std::sync::atomic::Ordering;
17
18/// The block allocation state.
19#[derive(Debug, PartialEq, Clone, Copy)]
20pub enum BlockState {
21    /// the block is not allocated.
22    Unallocated,
23    /// the block is allocated but not marked.
24    Unmarked,
25    /// the block is allocated and marked.
26    Marked,
27    /// the block is marked as reusable.
28    Reusable { unavailable_lines: u8 },
29}
30
31impl BlockState {
32    /// Private constant
33    const MARK_UNALLOCATED: u8 = 0;
34    /// Private constant
35    const MARK_UNMARKED: u8 = u8::MAX;
36    /// Private constant
37    const MARK_MARKED: u8 = u8::MAX - 1;
38}
39
40impl From<u8> for BlockState {
41    fn from(state: u8) -> Self {
42        match state {
43            Self::MARK_UNALLOCATED => BlockState::Unallocated,
44            Self::MARK_UNMARKED => BlockState::Unmarked,
45            Self::MARK_MARKED => BlockState::Marked,
46            unavailable_lines => BlockState::Reusable { unavailable_lines },
47        }
48    }
49}
50
51impl From<BlockState> for u8 {
52    fn from(state: BlockState) -> Self {
53        match state {
54            BlockState::Unallocated => BlockState::MARK_UNALLOCATED,
55            BlockState::Unmarked => BlockState::MARK_UNMARKED,
56            BlockState::Marked => BlockState::MARK_MARKED,
57            BlockState::Reusable { unavailable_lines } => unavailable_lines,
58        }
59    }
60}
61
62impl BlockState {
63    /// Test if the block is reuasable.
64    pub const fn is_reusable(&self) -> bool {
65        matches!(self, BlockState::Reusable { .. })
66    }
67}
68
69/// Data structure to reference an immix block.
70#[repr(transparent)]
71#[derive(Debug, Clone, Copy, PartialOrd, PartialEq)]
72pub struct Block(Address);
73
74impl Region for Block {
75    #[cfg(not(feature = "immix_smaller_block"))]
76    const LOG_BYTES: usize = 15;
77    #[cfg(feature = "immix_smaller_block")]
78    const LOG_BYTES: usize = 13;
79
80    fn from_aligned_address(address: Address) -> Self {
81        debug_assert!(address.is_aligned_to(Self::BYTES));
82        Self(address)
83    }
84
85    fn start(&self) -> Address {
86        self.0
87    }
88}
89
90impl BlockMayHaveObjects for Block {
91    fn may_have_objects(&self) -> bool {
92        self.get_state() != BlockState::Unallocated
93    }
94}
95
96impl Block {
97    /// Log pages in block
98    pub const LOG_PAGES: usize = Self::LOG_BYTES - LOG_BYTES_IN_PAGE as usize;
99    /// Pages in block
100    pub const PAGES: usize = 1 << Self::LOG_PAGES;
101    /// Log lines in block
102    pub const LOG_LINES: usize = Self::LOG_BYTES - Line::LOG_BYTES;
103    /// Lines in block
104    pub const LINES: usize = 1 << Self::LOG_LINES;
105
106    /// Block defrag state table (side)
107    pub const DEFRAG_STATE_TABLE: SideMetadataSpec =
108        crate::util::metadata::side_metadata::spec_defs::IX_BLOCK_DEFRAG;
109
110    /// Block mark table (side)
111    pub const MARK_TABLE: SideMetadataSpec =
112        crate::util::metadata::side_metadata::spec_defs::IX_BLOCK_MARK;
113
114    /// Get the chunk containing the block.
115    pub fn chunk(&self) -> Chunk {
116        Chunk::from_unaligned_address(self.0)
117    }
118
119    /// Get the address range of the block's line mark table.
120    #[allow(clippy::assertions_on_constants)]
121    pub fn line_mark_table(&self) -> MetadataByteArrayRef<{ Block::LINES }> {
122        debug_assert!(!super::BLOCK_ONLY);
123        MetadataByteArrayRef::<{ Block::LINES }>::new(&Line::MARK_TABLE, self.start(), Self::BYTES)
124    }
125
126    /// Get block mark state.
127    pub fn get_state(&self) -> BlockState {
128        let byte = Self::MARK_TABLE.load_atomic::<u8>(self.start(), Ordering::SeqCst);
129        byte.into()
130    }
131
132    /// Set block mark state.
133    pub fn set_state(&self, state: BlockState) {
134        let state = u8::from(state);
135        Self::MARK_TABLE.store_atomic::<u8>(self.start(), state, Ordering::SeqCst);
136    }
137
138    // Defrag byte
139
140    const DEFRAG_SOURCE_STATE: u8 = u8::MAX;
141
142    /// Test if the block is marked for defragmentation.
143    pub fn is_defrag_source(&self) -> bool {
144        let byte = Self::DEFRAG_STATE_TABLE.load_atomic::<u8>(self.start(), Ordering::SeqCst);
145        // The byte should be 0 (not defrag source) or 255 (defrag source) if this is a major defrag GC, as we set the values in PrepareBlockState.
146        // But it could be any value in a nursery GC.
147        byte == Self::DEFRAG_SOURCE_STATE
148    }
149
150    /// Mark the block for defragmentation.
151    pub fn set_as_defrag_source(&self, defrag: bool) {
152        let byte = if defrag { Self::DEFRAG_SOURCE_STATE } else { 0 };
153        Self::DEFRAG_STATE_TABLE.store_atomic::<u8>(self.start(), byte, Ordering::SeqCst);
154    }
155
156    /// Record the number of holes in the block.
157    pub fn set_holes(&self, holes: usize) {
158        Self::DEFRAG_STATE_TABLE.store_atomic::<u8>(self.start(), holes as u8, Ordering::SeqCst);
159    }
160
161    /// Get the number of holes.
162    pub fn get_holes(&self) -> usize {
163        let byte = Self::DEFRAG_STATE_TABLE.load_atomic::<u8>(self.start(), Ordering::SeqCst);
164        debug_assert_ne!(byte, Self::DEFRAG_SOURCE_STATE);
165        byte as usize
166    }
167
168    /// Initialize a clean block after acquired from page-resource.
169    pub fn init(&self, copy: bool) {
170        self.set_state(if copy {
171            BlockState::Marked
172        } else {
173            BlockState::Unmarked
174        });
175        Self::DEFRAG_STATE_TABLE.store_atomic::<u8>(self.start(), 0, Ordering::SeqCst);
176    }
177
178    /// Deinitalize a block before releasing.
179    pub fn deinit(&self) {
180        self.set_state(BlockState::Unallocated);
181    }
182
183    pub fn start_line(&self) -> Line {
184        Line::from_aligned_address(self.start())
185    }
186
187    pub fn end_line(&self) -> Line {
188        Line::from_aligned_address(self.end())
189    }
190
191    /// Get the range of lines within the block.
192    #[allow(clippy::assertions_on_constants)]
193    pub fn lines(&self) -> RegionIterator<Line> {
194        debug_assert!(!super::BLOCK_ONLY);
195        RegionIterator::<Line>::new(self.start_line(), self.end_line())
196    }
197
198    /// Sweep this block.
199    /// Return true if the block is swept.
200    pub fn sweep<VM: VMBinding>(
201        &self,
202        space: &ImmixSpace<VM>,
203        mark_histogram: &mut Histogram,
204        line_mark_state: Option<u8>,
205    ) -> bool {
206        if super::BLOCK_ONLY {
207            match self.get_state() {
208                BlockState::Unallocated => false,
209                BlockState::Unmarked => {
210                    #[cfg(feature = "vo_bit")]
211                    vo_bit::helper::on_region_swept::<VM, _>(self, false);
212
213                    // If the pin bit is not on the side, we cannot bulk zero.
214                    // We shouldn't need to clear it here in that case, since the pin bit
215                    // should be overwritten at each object allocation. The same applies below
216                    // when we are sweeping on a line granularity.
217                    #[cfg(feature = "object_pinning")]
218                    if let MetadataSpec::OnSide(side) = *VM::VMObjectModel::LOCAL_PINNING_BIT_SPEC {
219                        side.bzero_metadata(self.start(), Block::BYTES);
220                    }
221
222                    // Release the block if it is allocated but not marked by the current GC.
223                    space.release_block(*self);
224                    true
225                }
226                BlockState::Marked => {
227                    #[cfg(feature = "vo_bit")]
228                    vo_bit::helper::on_region_swept::<VM, _>(self, true);
229
230                    // The block is live.
231                    false
232                }
233                _ => unreachable!(),
234            }
235        } else {
236            // Calculate number of marked lines and holes.
237            let mut marked_lines = 0;
238            let mut holes = 0;
239            let mut prev_line_is_marked = true;
240            let line_mark_state = line_mark_state.unwrap();
241
242            for line in self.lines() {
243                if line.is_marked(line_mark_state) {
244                    marked_lines += 1;
245                    prev_line_is_marked = true;
246                } else {
247                    if prev_line_is_marked {
248                        holes += 1;
249                    }
250                    // We need to clear the line mark state at least twice in every 128 GC
251                    // otherwise, the line mark state of the last GC will stick around
252                    if line_mark_state > Line::MAX_MARK_STATE - 2 {
253                        line.mark(0);
254                    }
255                    #[cfg(feature = "immix_zero_on_release")]
256                    crate::util::memory::zero(line.start(), Line::BYTES);
257
258                    // We need to clear the pin bit if it is on the side, as this line can be reused
259                    #[cfg(feature = "object_pinning")]
260                    if let MetadataSpec::OnSide(side) = *VM::VMObjectModel::LOCAL_PINNING_BIT_SPEC {
261                        side.bzero_metadata(line.start(), Line::BYTES);
262                    }
263
264                    prev_line_is_marked = false;
265                }
266            }
267
268            if marked_lines == 0 {
269                #[cfg(feature = "vo_bit")]
270                vo_bit::helper::on_region_swept::<VM, _>(self, false);
271
272                // Release the block if non of its lines are marked.
273                space.release_block(*self);
274                true
275            } else {
276                // There are some marked lines. Keep the block live.
277                if marked_lines != Block::LINES {
278                    // There are holes. Mark the block as reusable.
279                    self.set_state(BlockState::Reusable {
280                        unavailable_lines: marked_lines as _,
281                    });
282                    space.reusable_blocks.push(*self)
283                } else {
284                    // Clear mark state.
285                    self.set_state(BlockState::Unmarked);
286                }
287                // Update mark_histogram
288                mark_histogram[holes] += marked_lines;
289                // Record number of holes in block side metadata.
290                self.set_holes(holes);
291
292                #[cfg(feature = "vo_bit")]
293                vo_bit::helper::on_region_swept::<VM, _>(self, true);
294
295                false
296            }
297        }
298    }
299
300    /// Clear VO bits metadata for unmarked regions.
301    /// This is useful for clearing VO bits during nursery GC for StickyImmix
302    /// at which time young objects (allocated in unmarked regions) may die
303    /// but we always consider old objects (in marked regions) as live.
304    #[cfg(feature = "vo_bit")]
305    pub fn clear_vo_bits_for_unmarked_regions(&self, line_mark_state: Option<u8>) {
306        match line_mark_state {
307            None => {
308                match self.get_state() {
309                    BlockState::Unmarked => {
310                        // It may contain young objects.  Clear it.
311                        vo_bit::bzero_vo_bit(self.start(), Self::BYTES);
312                    }
313                    BlockState::Marked => {
314                        // It contains old objects.  Skip it.
315                    }
316                    _ => unreachable!(),
317                }
318            }
319            Some(state) => {
320                // With lines.
321                for line in self.lines() {
322                    if !line.is_marked(state) {
323                        // It may contain young objects.  Clear it.
324                        vo_bit::bzero_vo_bit(line.start(), Line::BYTES);
325                    }
326                }
327            }
328        }
329    }
330}
331
332/// A non-block single-linked list to store blocks.
333pub struct ReusableBlockPool {
334    queue: BlockPool<Block>,
335    num_workers: usize,
336}
337
338impl ReusableBlockPool {
339    /// Create empty block list
340    pub fn new(num_workers: usize) -> Self {
341        Self {
342            queue: BlockPool::new(num_workers),
343            num_workers,
344        }
345    }
346
347    /// Get number of blocks in this list.
348    pub fn len(&self) -> usize {
349        self.queue.len()
350    }
351
352    /// Add a block to the list.
353    pub fn push(&self, block: Block) {
354        self.queue.push(block)
355    }
356
357    /// Pop a block out of the list.
358    pub fn pop(&self) -> Option<Block> {
359        self.queue.pop()
360    }
361
362    /// Clear the list.
363    pub fn reset(&mut self) {
364        self.queue = BlockPool::new(self.num_workers);
365    }
366
367    /// Iterate all the blocks in the queue. Call the visitor for each reported block.
368    pub fn iterate_blocks(&self, mut f: impl FnMut(Block)) {
369        self.queue.iterate_blocks(&mut f);
370    }
371
372    /// Flush the block queue
373    pub fn flush_all(&self) {
374        self.queue.flush_all();
375    }
376}