1use std::ops::Range;
2
3use super::sft::SFT;
4use super::space::{CommonSpace, Space};
5use crate::plan::VectorObjectQueue;
6use crate::policy::gc_work::{TraceKind, TRACE_KIND_TRANSITIVE_PIN};
7use crate::policy::sft::GCWorkerMutRef;
8use crate::scheduler::GCWorker;
9use crate::util::alloc::allocator::align_allocation_no_fill;
10use crate::util::constants::LOG_BYTES_IN_WORD;
11use crate::util::copy::CopySemantics;
12use crate::util::heap::{MonotonePageResource, PageResource};
13use crate::util::metadata::{extract_side_metadata, vo_bit};
14use crate::util::object_enum::{self, ObjectEnumerator};
15use crate::util::{Address, ObjectReference};
16use crate::{vm::*, ObjectQueue};
17use atomic::Ordering;
18
19pub(crate) const TRACE_KIND_MARK: TraceKind = 0;
20pub(crate) const TRACE_KIND_FORWARD: TraceKind = 1;
21
22pub struct MarkCompactSpace<VM: VMBinding> {
23 common: CommonSpace<VM>,
24 pr: MonotonePageResource<VM>,
25}
26
27const GC_MARK_BIT_MASK: u8 = 1;
28
29pub const GC_EXTRA_HEADER_WORD: usize = 1;
33const GC_EXTRA_HEADER_BYTES: usize = GC_EXTRA_HEADER_WORD << LOG_BYTES_IN_WORD;
34
35impl<VM: VMBinding> SFT for MarkCompactSpace<VM> {
36 fn name(&self) -> &'static str {
37 self.get_name()
38 }
39
40 fn get_forwarded_object(&self, object: ObjectReference) -> Option<ObjectReference> {
41 Self::get_header_forwarding_pointer(object)
42 }
43
44 fn is_live(&self, object: ObjectReference) -> bool {
45 Self::is_marked(object)
46 }
47
48 #[cfg(feature = "object_pinning")]
49 fn pin_object(&self, _object: ObjectReference) -> bool {
50 panic!("Cannot pin/unpin objects of MarkCompactSpace.")
51 }
52
53 #[cfg(feature = "object_pinning")]
54 fn unpin_object(&self, _object: ObjectReference) -> bool {
55 panic!("Cannot pin/unpin objects of MarkCompactSpace.")
56 }
57
58 #[cfg(feature = "object_pinning")]
59 fn is_object_pinned(&self, _object: ObjectReference) -> bool {
60 false
61 }
62
63 fn is_movable(&self) -> bool {
64 true
65 }
66
67 fn initialize_object_metadata(&self, object: ObjectReference) {
68 crate::util::metadata::vo_bit::set_vo_bit(object);
69 }
70
71 #[cfg(feature = "sanity")]
72 fn is_sane(&self) -> bool {
73 true
74 }
75
76 #[cfg(feature = "is_mmtk_object")]
77 fn is_mmtk_object(&self, addr: Address) -> Option<ObjectReference> {
78 crate::util::metadata::vo_bit::is_vo_bit_set_for_addr(addr)
79 }
80
81 #[cfg(feature = "is_mmtk_object")]
82 fn find_object_from_internal_pointer(
83 &self,
84 ptr: Address,
85 max_search_bytes: usize,
86 ) -> Option<ObjectReference> {
87 crate::util::metadata::vo_bit::find_object_from_internal_pointer::<VM>(
88 ptr,
89 max_search_bytes,
90 )
91 }
92
93 fn sft_trace_object(
94 &self,
95 _queue: &mut VectorObjectQueue,
96 _object: ObjectReference,
97 _worker: GCWorkerMutRef,
98 ) -> ObjectReference {
99 panic!("sft_trace_object() cannot be used with mark compact space")
102 }
103
104 fn debug_print_object_info(&self, object: ObjectReference) {
105 println!("marked = {}", MarkCompactSpace::<VM>::is_marked(object));
106 println!(
107 "head forwarding pointer = {:?}",
108 MarkCompactSpace::<VM>::get_header_forwarding_pointer(object)
109 );
110 self.common.debug_print_object_global_info(object);
111 }
112}
113
114impl<VM: VMBinding> Space<VM> for MarkCompactSpace<VM> {
115 fn as_space(&self) -> &dyn Space<VM> {
116 self
117 }
118
119 fn as_sft(&self) -> &(dyn SFT + Sync + 'static) {
120 self
121 }
122
123 fn get_page_resource(&self) -> &dyn PageResource<VM> {
124 &self.pr
125 }
126
127 fn maybe_get_page_resource_mut(&mut self) -> Option<&mut dyn PageResource<VM>> {
128 Some(&mut self.pr)
129 }
130
131 fn common(&self) -> &CommonSpace<VM> {
132 &self.common
133 }
134
135 fn initialize_sft(&self, sft_map: &mut dyn crate::policy::sft_map::SFTMap) {
136 self.common().initialize_sft(self.as_sft(), sft_map)
137 }
138
139 fn release_multiple_pages(&mut self, _start: Address) {
140 panic!("markcompactspace only releases pages enmasse")
141 }
142
143 fn enumerate_objects(&self, enumerator: &mut dyn ObjectEnumerator) {
144 object_enum::enumerate_blocks_from_monotonic_page_resource(enumerator, &self.pr);
145 }
146
147 fn clear_side_log_bits(&self) {
148 unimplemented!()
149 }
150
151 fn set_side_log_bits(&self) {
152 unimplemented!()
153 }
154}
155
156impl<VM: VMBinding> crate::policy::gc_work::PolicyTraceObject<VM> for MarkCompactSpace<VM> {
157 fn trace_object<Q: ObjectQueue, const KIND: crate::policy::gc_work::TraceKind>(
158 &self,
159 queue: &mut Q,
160 object: ObjectReference,
161 _copy: Option<CopySemantics>,
162 _worker: &mut GCWorker<VM>,
163 ) -> ObjectReference {
164 debug_assert!(
165 KIND != TRACE_KIND_TRANSITIVE_PIN,
166 "MarkCompact does not support transitive pin trace."
167 );
168 if KIND == TRACE_KIND_MARK {
169 self.trace_mark_object(queue, object)
170 } else if KIND == TRACE_KIND_FORWARD {
171 self.trace_forward_object(queue, object)
172 } else {
173 unreachable!()
174 }
175 }
176 fn may_move_objects<const KIND: crate::policy::gc_work::TraceKind>() -> bool {
177 if KIND == TRACE_KIND_MARK {
178 false
179 } else if KIND == TRACE_KIND_FORWARD {
180 true
181 } else {
182 unreachable!()
183 }
184 }
185}
186
187impl<VM: VMBinding> MarkCompactSpace<VM> {
188 pub const HEADER_RESERVED_IN_BYTES: usize = if VM::MAX_ALIGNMENT > GC_EXTRA_HEADER_BYTES {
191 VM::MAX_ALIGNMENT
192 } else {
193 GC_EXTRA_HEADER_BYTES
194 }
195 .next_power_of_two();
196
197 fn header_forwarding_pointer_address(object: ObjectReference) -> Address {
206 object.to_object_start::<VM>() - GC_EXTRA_HEADER_BYTES
207 }
208
209 fn get_header_forwarding_pointer(object: ObjectReference) -> Option<ObjectReference> {
211 let addr = unsafe { Self::header_forwarding_pointer_address(object).load::<Address>() };
212 ObjectReference::from_raw_address(addr)
213 }
214
215 fn store_header_forwarding_pointer(
217 object: ObjectReference,
218 forwarding_pointer: ObjectReference,
219 ) {
220 unsafe {
221 Self::header_forwarding_pointer_address(object)
222 .store::<ObjectReference>(forwarding_pointer);
223 }
224 }
225
226 fn clear_header_forwarding_pointer(object: ObjectReference) {
228 crate::util::memory::zero(
229 Self::header_forwarding_pointer_address(object),
230 GC_EXTRA_HEADER_BYTES,
231 );
232 }
233
234 pub fn new(args: crate::policy::space::PlanCreateSpaceArgs<VM>) -> Self {
235 let vm_map = args.vm_map;
236 let is_discontiguous = args.vmrequest.is_discontiguous();
237 let local_specs = extract_side_metadata(&[*VM::VMObjectModel::LOCAL_MARK_BIT_SPEC]);
238 let common = CommonSpace::new(args.into_policy_args(true, false, local_specs));
239 MarkCompactSpace {
240 pr: if is_discontiguous {
241 MonotonePageResource::new_discontiguous(vm_map)
242 } else {
243 MonotonePageResource::new_contiguous(common.start, common.extent, vm_map)
244 },
245 common,
246 }
247 }
248
249 pub fn prepare(&self) {}
250
251 pub fn release(&self) {}
252
253 pub fn trace_mark_object<Q: ObjectQueue>(
254 &self,
255 queue: &mut Q,
256 object: ObjectReference,
257 ) -> ObjectReference {
258 debug_assert!(
259 crate::util::metadata::vo_bit::is_vo_bit_set(object),
260 "{:x}: VO bit not set",
261 object
262 );
263 if MarkCompactSpace::<VM>::test_and_mark(object) {
264 queue.enqueue(object);
265 }
266 object
267 }
268
269 pub fn trace_forward_object<Q: ObjectQueue>(
270 &self,
271 queue: &mut Q,
272 object: ObjectReference,
273 ) -> ObjectReference {
274 debug_assert!(
275 crate::util::metadata::vo_bit::is_vo_bit_set(object),
276 "{:x}: VO bit not set",
277 object
278 );
279 if MarkCompactSpace::<VM>::test_and_clear_mark(object) {
282 queue.enqueue(object);
283 }
284
285 Self::get_header_forwarding_pointer(object)
286 .unwrap_or_else(|| panic!("Object {object} does not have a forwarding pointer"))
287 }
288
289 pub fn test_and_mark(object: ObjectReference) -> bool {
290 loop {
291 let old_value = VM::VMObjectModel::LOCAL_MARK_BIT_SPEC.load_atomic::<VM, u8>(
292 object,
293 None,
294 Ordering::SeqCst,
295 );
296 let mark_bit = old_value & GC_MARK_BIT_MASK;
297 if mark_bit != 0 {
298 return false;
299 }
300 if VM::VMObjectModel::LOCAL_MARK_BIT_SPEC
301 .compare_exchange_metadata::<VM, u8>(
302 object,
303 old_value,
304 1,
305 None,
306 Ordering::SeqCst,
307 Ordering::SeqCst,
308 )
309 .is_ok()
310 {
311 break;
312 }
313 }
314 true
315 }
316
317 pub fn test_and_clear_mark(object: ObjectReference) -> bool {
318 loop {
319 let old_value = VM::VMObjectModel::LOCAL_MARK_BIT_SPEC.load_atomic::<VM, u8>(
320 object,
321 None,
322 Ordering::SeqCst,
323 );
324 let mark_bit = old_value & GC_MARK_BIT_MASK;
325 if mark_bit == 0 {
326 return false;
327 }
328
329 if VM::VMObjectModel::LOCAL_MARK_BIT_SPEC
330 .compare_exchange_metadata::<VM, u8>(
331 object,
332 old_value,
333 0,
334 None,
335 Ordering::SeqCst,
336 Ordering::SeqCst,
337 )
338 .is_ok()
339 {
340 break;
341 }
342 }
343 true
344 }
345
346 pub fn is_marked(object: ObjectReference) -> bool {
347 let old_value = VM::VMObjectModel::LOCAL_MARK_BIT_SPEC.load_atomic::<VM, u8>(
348 object,
349 None,
350 Ordering::SeqCst,
351 );
352 let mark_bit = old_value & GC_MARK_BIT_MASK;
353 mark_bit != 0
354 }
355
356 fn to_be_compacted(object: &ObjectReference) -> bool {
357 Self::is_marked(*object)
358 }
359
360 fn linear_scan_objects(&self, range: Range<Address>) -> impl Iterator<Item = ObjectReference> {
362 crate::util::linear_scan::ObjectIterator::<VM, MarkCompactObjectSize<VM>, true>::new(
363 range.start,
364 range.end,
365 )
366 }
367
368 pub fn calculate_forwarding_pointer(&self) {
369 let mut to_iter = self.pr.iterate_allocated_regions();
370 let Some((mut to_cursor, mut to_size)) = to_iter.next() else {
371 return;
372 };
373 let mut to_end = to_cursor + to_size;
374 for (from_start, size) in self.pr.iterate_allocated_regions() {
375 let from_end = from_start + size;
376 for obj in self
378 .linear_scan_objects(from_start..from_end)
379 .filter(Self::to_be_compacted)
380 {
381 let copied_size =
382 VM::VMObjectModel::get_size_when_copied(obj) + Self::HEADER_RESERVED_IN_BYTES;
383 let align = VM::VMObjectModel::get_align_when_copied(obj);
384 let offset = VM::VMObjectModel::get_align_offset_when_copied(obj);
385 to_cursor = align_allocation_no_fill::<VM>(to_cursor, align, offset);
387 if to_cursor + copied_size > to_end {
389 (to_cursor, to_size) = to_iter.next().unwrap();
390 to_end = to_cursor + to_size;
391 to_cursor = align_allocation_no_fill::<VM>(to_cursor, align, offset);
392 debug_assert!(to_cursor + copied_size <= to_end);
393 }
394 let new_obj = VM::VMObjectModel::get_reference_when_copied_to(
396 obj,
397 to_cursor + Self::HEADER_RESERVED_IN_BYTES,
398 );
399 Self::store_header_forwarding_pointer(obj, new_obj);
401 trace!(
402 "Calculate forward: {} (size when copied = {}) ~> {} (size = {})",
403 obj,
404 VM::VMObjectModel::get_size_when_copied(obj),
405 to_cursor,
406 copied_size
407 );
408 to_cursor += copied_size;
410 }
411 }
412 }
413
414 pub fn compact(&self) {
415 let mut to = Address::ZERO;
416 for (from_start, size) in self.pr.iterate_allocated_regions() {
417 let from_end = from_start + size;
418 for obj in self.linear_scan_objects(from_start..from_end) {
419 let copied_size = VM::VMObjectModel::get_size_when_copied(obj);
420 vo_bit::unset_vo_bit(obj);
422
423 let maybe_forwarding_pointer = Self::get_header_forwarding_pointer(obj);
424 if let Some(forwarding_pointer) = maybe_forwarding_pointer {
425 trace!("Compact {} to {}", obj, forwarding_pointer);
426 let new_object = forwarding_pointer;
427 Self::clear_header_forwarding_pointer(new_object);
428
429 trace!(" copy from {} to {}", obj, new_object);
431 let end_of_new_object =
432 VM::VMObjectModel::copy_to(obj, new_object, Address::ZERO);
433 vo_bit::set_vo_bit(new_object);
435 to = new_object.to_object_start::<VM>() + copied_size;
436 debug_assert_eq!(end_of_new_object, to);
437 } else {
438 trace!("Skipping dead object {}", obj);
439 }
440 }
441 }
442
443 debug!("Compact end: to = {}", to);
444
445 self.pr.reset_cursor(to);
447 }
448}
449
450struct MarkCompactObjectSize<VM>(std::marker::PhantomData<VM>);
451impl<VM: VMBinding> crate::util::linear_scan::LinearScanObjectSize for MarkCompactObjectSize<VM> {
452 fn size(object: ObjectReference) -> usize {
453 VM::VMObjectModel::get_current_size(object)
454 }
455}