mmtk/util/heap/
chunk_map.rs

1use crate::scheduler::GCWork;
2use crate::util::linear_scan::Region;
3use crate::util::linear_scan::RegionIterator;
4use crate::util::metadata::side_metadata::SideMetadataSpec;
5use crate::util::Address;
6use crate::vm::VMBinding;
7use spin::Mutex;
8use std::ops::Range;
9
10/// Data structure to reference a MMTk 4 MB chunk.
11#[repr(transparent)]
12#[derive(Debug, Clone, Copy, PartialOrd, PartialEq, Eq)]
13pub struct Chunk(Address);
14
15impl Region for Chunk {
16    const LOG_BYTES: usize = crate::util::heap::layout::vm_layout::LOG_BYTES_IN_CHUNK;
17
18    fn from_aligned_address(address: Address) -> Self {
19        debug_assert!(address.is_aligned_to(Self::BYTES));
20        Self(address)
21    }
22
23    fn start(&self) -> Address {
24        self.0
25    }
26}
27
28impl Chunk {
29    /// Chunk constant with zero address
30    // FIXME: We use this as an empty value. What if we actually use the first chunk?
31    pub const ZERO: Self = Self(Address::ZERO);
32
33    /// Get an iterator for regions within this chunk.
34    pub fn iter_region<R: Region>(&self) -> RegionIterator<R> {
35        // R should be smaller than a chunk
36        debug_assert!(R::LOG_BYTES < Self::LOG_BYTES);
37        // R should be aligned to chunk boundary
38        debug_assert!(R::is_aligned(self.start()));
39        debug_assert!(R::is_aligned(self.end()));
40
41        let start = R::from_aligned_address(self.start());
42        let end = R::from_aligned_address(self.end());
43        RegionIterator::<R>::new(start, end)
44    }
45}
46
47/// The allocation state for a chunk in the chunk map. It includes whether each chunk is allocated or free, and the space the chunk belongs to.
48/// Highest bit: 0 = free, 1 = allocated
49/// Lower 4 bits: Space index (0-15) if the chunk is allocated.
50#[repr(transparent)]
51#[derive(PartialEq, Clone, Copy)]
52pub struct ChunkState(u8);
53
54impl ChunkState {
55    const ALLOC_BIT_MASK: u8 = 0x80;
56    const SPACE_INDEX_MASK: u8 = 0x0F;
57
58    /// Create a new ChunkState that represents being allocated in the given space
59    pub fn allocated(space_index: usize) -> ChunkState {
60        debug_assert!(space_index < crate::util::heap::layout::heap_parameters::MAX_SPACES);
61        let mut encode = space_index as u8;
62        encode |= Self::ALLOC_BIT_MASK;
63        ChunkState(encode)
64    }
65    /// Create a new ChunkState that represents being free
66    pub fn free() -> ChunkState {
67        ChunkState(0u8)
68    }
69    /// Is the chunk free?
70    pub fn is_free(&self) -> bool {
71        self.0 == 0
72    }
73    /// Is the chunk allocated?
74    pub fn is_allocated(&self) -> bool {
75        !self.is_free()
76    }
77    /// Get the space index of the chunk
78    pub fn get_space_index(&self) -> usize {
79        debug_assert!(self.is_allocated());
80        let index = (self.0 & Self::SPACE_INDEX_MASK) as usize;
81        debug_assert!(index < crate::util::heap::layout::heap_parameters::MAX_SPACES);
82        index
83    }
84}
85
86impl std::fmt::Debug for ChunkState {
87    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
88        if self.is_free() {
89            write!(f, "Free")
90        } else {
91            write!(f, "Allocated({})", self.get_space_index())
92        }
93    }
94}
95
96/// A byte-map to record all the allocated chunks.
97/// A plan can use this to maintain records for the chunks that they used, and the states of the chunks.
98/// Any plan that uses the chunk map should include the `ALLOC_TABLE` spec in their global sidemetadata specs.
99///
100/// A chunk map is created for a space (identified by the space index), and will only update or list chunks for that space.
101pub struct ChunkMap {
102    /// The space that uses this chunk map.
103    space_index: usize,
104    /// The range of chunks that are used by the space. The range only records the lowest chunk and the highest chunk.
105    /// All the chunks that are used for the space are within the range, but not necessarily that all the chunks in the range
106    /// are used for the space. Spaces may be discontiguous, thus the range may include chunks that do not belong to the space.
107    /// We need to use the space index in the chunk map and the space index encoded with the chunk state to know if
108    /// the chunk belongs to the current space.
109    chunk_range: Mutex<Range<Chunk>>,
110}
111
112impl ChunkMap {
113    /// Chunk alloc table
114    pub const ALLOC_TABLE: SideMetadataSpec =
115        crate::util::metadata::side_metadata::spec_defs::CHUNK_MARK;
116
117    pub fn new(space_index: usize) -> Self {
118        Self {
119            space_index,
120            chunk_range: Mutex::new(Chunk::ZERO..Chunk::ZERO),
121        }
122    }
123
124    /// Set a chunk as allocated, or as free.
125    pub fn set_allocated(&self, chunk: Chunk, allocated: bool) {
126        let state = if allocated {
127            ChunkState::allocated(self.space_index)
128        } else {
129            ChunkState::free()
130        };
131        // Do nothing if the chunk is already in the expected state.
132        if self.get_internal(chunk) == state {
133            return;
134        }
135        #[cfg(debug_assertions)]
136        {
137            let old_state = self.get_internal(chunk);
138            // If a chunk is free, any space may use it. If a chunk is not free, only the current space may update its state.
139            assert!(
140                old_state.is_free() || old_state.get_space_index() == self.space_index,
141                "Chunk {:?}: old state {:?}, new state {:?}. Cannot set to new state.",
142                chunk,
143                old_state,
144                state
145            );
146        }
147        // Update alloc byte
148        unsafe { Self::ALLOC_TABLE.store::<u8>(chunk.start(), state.0) };
149        // If this is a newly allcoated chunk, then expand the chunk range.
150        if allocated {
151            debug_assert!(!chunk.start().is_zero());
152            let mut range = self.chunk_range.lock();
153            if range.start == Chunk::ZERO {
154                // FIXME: what if we actually use the first chunk?
155                range.start = chunk;
156                range.end = chunk.next();
157            } else if chunk < range.start {
158                range.start = chunk;
159            } else if range.end <= chunk {
160                range.end = chunk.next();
161            }
162        }
163    }
164
165    /// Get chunk state. Return None if the chunk does not belong to the space.
166    pub fn get(&self, chunk: Chunk) -> Option<ChunkState> {
167        let state = self.get_internal(chunk);
168        (state.is_allocated() && state.get_space_index() == self.space_index).then_some(state)
169    }
170
171    /// Get chunk state, regardless of the space. This should always be private.
172    fn get_internal(&self, chunk: Chunk) -> ChunkState {
173        let byte = unsafe { Self::ALLOC_TABLE.load::<u8>(chunk.start()) };
174        ChunkState(byte)
175    }
176
177    /// A range of all allocated chunks by this space in the heap.
178    pub fn all_chunks(&self) -> impl Iterator<Item = Chunk> + '_ {
179        let chunk_range = self.chunk_range.lock();
180        RegionIterator::<Chunk>::new(chunk_range.start, chunk_range.end)
181            .filter(|c| self.get(*c).is_some())
182    }
183
184    /// Helper function to create per-chunk processing work packets for each allocated chunks.
185    pub fn generate_tasks<VM: VMBinding>(
186        &self,
187        func: impl Fn(Chunk) -> Box<dyn GCWork<VM>>,
188    ) -> Vec<Box<dyn GCWork<VM>>> {
189        let mut work_packets: Vec<Box<dyn GCWork<VM>>> = vec![];
190        for chunk in self.all_chunks() {
191            work_packets.push(func(chunk));
192        }
193        work_packets
194    }
195}