mmtk/policy/compressor/
compressorspace.rs

1use crate::plan::VectorObjectQueue;
2use crate::policy::compressor::forwarding;
3use crate::policy::gc_work::{TraceKind, TRACE_KIND_TRANSITIVE_PIN};
4use crate::policy::largeobjectspace::LargeObjectSpace;
5use crate::policy::sft::{GCWorkerMutRef, SFT};
6use crate::policy::space::{CommonSpace, Space};
7use crate::scheduler::{GCWork, GCWorkScheduler, GCWorker, WorkBucketStage};
8use crate::util::copy::CopySemantics;
9use crate::util::heap::regionpageresource::AllocatedRegion;
10use crate::util::heap::{PageResource, RegionPageResource};
11use crate::util::linear_scan::Region;
12use crate::util::metadata::extract_side_metadata;
13#[cfg(feature = "vo_bit")]
14use crate::util::metadata::vo_bit;
15use crate::util::metadata::MetadataSpec;
16use crate::util::object_enum::{self, ObjectEnumerator};
17use crate::util::{Address, ObjectReference};
18use crate::vm::slot::Slot;
19use crate::MMTK;
20use crate::{vm::*, ObjectQueue};
21use atomic::Ordering;
22use std::sync::Arc;
23
24pub(crate) const TRACE_KIND_MARK: TraceKind = 0;
25pub(crate) const TRACE_KIND_FORWARD_ROOT: TraceKind = 1;
26
27/// [`CompressorSpace`] is a stop-the-world implementation of
28/// the Compressor, as described in Kermany and Petrank,
29/// [The Compressor: concurrent, incremental, and parallel compaction](https://dl.acm.org/doi/10.1145/1133255.1134023).
30///
31/// [`CompressorSpace`] makes two main diversions from the paper
32/// (aside from [`CompressorSpace`] being stop-the-world):
33/// - The heap is structured into regions ([`forwarding::CompressorRegion`])
34///   which the collector compacts separately.
35/// - The collector compacts each region in-place, instead of using two virtual
36///   spaces as in Kermany and Petrank. The virtual spaces side-step a race which
37///   would occur if multiple threads attempted to compact one heap in place: one thread
38///   could move an object to the location of another object which has yet to be moved by
39///   another thread. Kermany and Petrank move objects between from- and to- virtual spaces,
40///   preventing the old objects from being overwritten. (They reclaim memory by unmapping
41///   pages of the from-virtual space after moving all objects out of said pages.)
42///   We instead side-step this race by assigning only a single thread to each region, and
43///   running multiple single-threaded Compressors at once.
44pub struct CompressorSpace<VM: VMBinding> {
45    common: CommonSpace<VM>,
46    pr: RegionPageResource<VM, forwarding::CompressorRegion>,
47    forwarding: forwarding::ForwardingMetadata<VM>,
48    scheduler: Arc<GCWorkScheduler<VM>>,
49}
50
51impl<VM: VMBinding> SFT for CompressorSpace<VM> {
52    fn name(&self) -> &'static str {
53        self.get_name()
54    }
55
56    fn get_forwarded_object(&self, object: ObjectReference) -> Option<ObjectReference> {
57        // Check if forwarding addresses have been calculated before attempting
58        // to forward objects
59        if self.forwarding.has_calculated_forwarding_addresses() {
60            Some(self.forward(object, false))
61        } else {
62            None
63        }
64    }
65
66    fn is_live(&self, object: ObjectReference) -> bool {
67        Self::is_marked(object)
68    }
69
70    #[cfg(feature = "object_pinning")]
71    fn pin_object(&self, _object: ObjectReference) -> bool {
72        panic!("Cannot pin/unpin objects of CompressorSpace.")
73    }
74
75    #[cfg(feature = "object_pinning")]
76    fn unpin_object(&self, _object: ObjectReference) -> bool {
77        panic!("Cannot pin/unpin objects of CompressorSpace.")
78    }
79
80    #[cfg(feature = "object_pinning")]
81    fn is_object_pinned(&self, _object: ObjectReference) -> bool {
82        false
83    }
84
85    fn is_movable(&self) -> bool {
86        true
87    }
88
89    fn initialize_object_metadata(&self, _object: ObjectReference) {
90        #[cfg(feature = "vo_bit")]
91        crate::util::metadata::vo_bit::set_vo_bit(_object);
92    }
93
94    #[cfg(feature = "sanity")]
95    fn is_sane(&self) -> bool {
96        true
97    }
98
99    #[cfg(feature = "is_mmtk_object")]
100    fn is_mmtk_object(&self, addr: Address) -> Option<ObjectReference> {
101        crate::util::metadata::vo_bit::is_vo_bit_set_for_addr(addr)
102    }
103
104    #[cfg(feature = "is_mmtk_object")]
105    fn find_object_from_internal_pointer(
106        &self,
107        ptr: Address,
108        max_search_bytes: usize,
109    ) -> Option<ObjectReference> {
110        crate::util::metadata::vo_bit::find_object_from_internal_pointer::<VM>(
111            ptr,
112            max_search_bytes,
113        )
114    }
115
116    fn sft_trace_object(
117        &self,
118        _queue: &mut VectorObjectQueue,
119        _object: ObjectReference,
120        _worker: GCWorkerMutRef,
121    ) -> ObjectReference {
122        // We should not use trace_object for compressor space.
123        // Depending on which trace it is, we should manually call either trace_mark or trace_forward.
124        panic!("sft_trace_object() cannot be used with CompressorSpace")
125    }
126
127    fn debug_print_object_info(&self, object: ObjectReference) {
128        println!("marked = {}", CompressorSpace::<VM>::is_marked(object));
129        println!("forwarding = {:?}", self.get_forwarded_object(object));
130        self.common.debug_print_object_global_info(object);
131    }
132}
133
134impl<VM: VMBinding> Space<VM> for CompressorSpace<VM> {
135    fn as_space(&self) -> &dyn Space<VM> {
136        self
137    }
138
139    fn as_sft(&self) -> &(dyn SFT + Sync + 'static) {
140        self
141    }
142
143    fn get_page_resource(&self) -> &dyn PageResource<VM> {
144        &self.pr
145    }
146
147    fn maybe_get_page_resource_mut(&mut self) -> Option<&mut dyn PageResource<VM>> {
148        Some(&mut self.pr)
149    }
150
151    fn common(&self) -> &CommonSpace<VM> {
152        &self.common
153    }
154
155    fn initialize_sft(&self, sft_map: &mut dyn crate::policy::sft_map::SFTMap) {
156        self.common().initialize_sft(self.as_sft(), sft_map)
157    }
158
159    fn release_multiple_pages(&mut self, _start: Address) {
160        panic!("compressorspace only releases pages enmasse")
161    }
162
163    fn enumerate_objects(&self, enumerator: &mut dyn ObjectEnumerator) {
164        self.pr.enumerate(enumerator);
165    }
166
167    fn clear_side_log_bits(&self) {
168        unimplemented!()
169    }
170
171    fn set_side_log_bits(&self) {
172        unimplemented!()
173    }
174}
175
176impl<VM: VMBinding> crate::policy::gc_work::PolicyTraceObject<VM> for CompressorSpace<VM> {
177    fn trace_object<Q: ObjectQueue, const KIND: crate::policy::gc_work::TraceKind>(
178        &self,
179        queue: &mut Q,
180        object: ObjectReference,
181        _copy: Option<CopySemantics>,
182        _worker: &mut GCWorker<VM>,
183    ) -> ObjectReference {
184        debug_assert!(
185            KIND != TRACE_KIND_TRANSITIVE_PIN,
186            "Compressor does not support transitive pin trace."
187        );
188        if KIND == TRACE_KIND_MARK {
189            self.trace_mark_object(queue, object)
190        } else if KIND == TRACE_KIND_FORWARD_ROOT {
191            self.trace_forward_root(queue, object)
192        } else {
193            unreachable!()
194        }
195    }
196    fn may_move_objects<const KIND: crate::policy::gc_work::TraceKind>() -> bool {
197        if KIND == TRACE_KIND_MARK {
198            false
199        } else if KIND == TRACE_KIND_FORWARD_ROOT {
200            true
201        } else {
202            unreachable!()
203        }
204    }
205}
206
207impl<VM: VMBinding> CompressorSpace<VM> {
208    pub fn new(args: crate::policy::space::PlanCreateSpaceArgs<VM>) -> Self {
209        let vm_map = args.vm_map;
210        assert!(
211            VM::VMObjectModel::UNIFIED_OBJECT_REFERENCE_ADDRESS,
212            "The Compressor requires a unified object reference address model"
213        );
214        let local_specs = extract_side_metadata(&[
215            MetadataSpec::OnSide(forwarding::MARK_SPEC),
216            MetadataSpec::OnSide(forwarding::OFFSET_VECTOR_SPEC),
217        ]);
218        let is_discontiguous = args.vmrequest.is_discontiguous();
219        let scheduler = args.scheduler.clone();
220        let common = CommonSpace::new(args.into_policy_args(true, false, local_specs));
221        CompressorSpace {
222            pr: if is_discontiguous {
223                RegionPageResource::new_discontiguous(vm_map)
224            } else {
225                RegionPageResource::new_contiguous(common.start, common.extent, vm_map)
226            },
227            forwarding: forwarding::ForwardingMetadata::new(),
228            common,
229            scheduler,
230        }
231    }
232
233    pub fn prepare(&self) {
234        self.pr
235            .enumerate_regions(&mut |r: &AllocatedRegion<forwarding::CompressorRegion>| {
236                forwarding::MARK_SPEC
237                    .bzero_metadata(r.region.start(), r.region.end() - r.region.start());
238            });
239    }
240
241    pub fn release(&self) {
242        self.forwarding.release();
243    }
244
245    pub fn trace_mark_object<Q: ObjectQueue>(
246        &self,
247        queue: &mut Q,
248        object: ObjectReference,
249    ) -> ObjectReference {
250        #[cfg(feature = "vo_bit")]
251        debug_assert!(
252            crate::util::metadata::vo_bit::is_vo_bit_set(object),
253            "{:x}: VO bit not set",
254            object
255        );
256        if CompressorSpace::<VM>::test_and_mark(object) {
257            queue.enqueue(object);
258            self.forwarding.mark_last_word_of_object(object);
259        }
260        object
261    }
262
263    pub fn trace_forward_root<Q: ObjectQueue>(
264        &self,
265        _queue: &mut Q,
266        object: ObjectReference,
267    ) -> ObjectReference {
268        self.forward(object, true)
269    }
270
271    pub fn test_and_mark(object: ObjectReference) -> bool {
272        forwarding::MARK_SPEC
273            .fetch_update_atomic::<u8, _>(
274                object.to_raw_address(),
275                Ordering::SeqCst,
276                Ordering::Relaxed,
277                |v| {
278                    if v == 0 {
279                        Some(1)
280                    } else {
281                        None
282                    }
283                },
284            )
285            .is_ok()
286    }
287
288    pub fn is_marked(object: ObjectReference) -> bool {
289        let mark_bit =
290            forwarding::MARK_SPEC.load_atomic::<u8>(object.to_raw_address(), Ordering::SeqCst);
291        mark_bit == 1
292    }
293
294    fn generate_tasks(
295        &self,
296        f: &mut impl FnMut(&AllocatedRegion<forwarding::CompressorRegion>, usize) -> Box<dyn GCWork<VM>>,
297    ) -> Vec<Box<dyn GCWork<VM>>> {
298        let mut packets = vec![];
299        let mut index = 0;
300        self.pr.enumerate_regions(&mut |r| {
301            packets.push(f(r, index));
302            index += 1;
303        });
304        packets
305    }
306
307    pub fn add_offset_vector_tasks(&'static self) {
308        let offset_vector_packets: Vec<Box<dyn GCWork<VM>>> = self.generate_tasks(&mut |r, _| {
309            Box::new(CalculateOffsetVector::<VM>::new(self, r.region, r.cursor()))
310        });
311        self.scheduler.work_buckets[WorkBucketStage::CalculateForwarding]
312            .bulk_add(offset_vector_packets);
313    }
314
315    pub fn calculate_offset_vector_for_region(
316        &self,
317        region: forwarding::CompressorRegion,
318        cursor: Address,
319    ) {
320        self.forwarding.calculate_offset_vector(region, cursor);
321    }
322
323    pub fn forward(&self, object: ObjectReference, _vo_bit_valid: bool) -> ObjectReference {
324        if !self.in_space(object) {
325            return object;
326        }
327        // We can't expect the VO bit to be valid whilst compacting the heap.
328        // If we are fixing a reference to an object which was moved before the referent,
329        // the relevant VO bit will have been cleared, and this assertion would fail.
330        // Thus we can only ever expect the VO bit to be valid whilst fixing the roots.
331        #[cfg(feature = "vo_bit")]
332        if _vo_bit_valid {
333            debug_assert!(
334                crate::util::metadata::vo_bit::is_vo_bit_set(object),
335                "{:x}: VO bit not set",
336                object
337            );
338        }
339        ObjectReference::from_raw_address(self.forwarding.forward(object.to_raw_address())).unwrap()
340    }
341
342    fn update_references(&self, worker: &mut GCWorker<VM>, object: ObjectReference) {
343        if VM::VMScanning::support_slot_enqueuing(worker.tls, object) {
344            VM::VMScanning::scan_object(worker.tls, object, &mut |s: VM::VMSlot| {
345                if let Some(o) = s.load() {
346                    s.store(self.forward(o, false));
347                }
348            });
349        } else {
350            VM::VMScanning::scan_object_and_trace_edges(worker.tls, object, &mut |o| {
351                self.forward(o, false)
352            });
353        }
354    }
355
356    pub fn add_compact_tasks(&'static self) {
357        let compact_packets: Vec<Box<dyn GCWork<VM>>> =
358            self.generate_tasks(&mut |_, i| Box::new(Compact::<VM>::new(self, i)));
359        self.scheduler.work_buckets[WorkBucketStage::Compact].bulk_add(compact_packets);
360    }
361
362    pub fn compact_region(&self, worker: &mut GCWorker<VM>, index: usize) {
363        self.pr.with_regions(&mut |regions| {
364            let r = &regions[index];
365            let start = r.region.start();
366            let end = r.cursor();
367            #[cfg(feature = "vo_bit")]
368            {
369                #[cfg(debug_assertions)]
370                self.forwarding
371                    .scan_marked_objects(start, end, &mut |object: ObjectReference| {
372                        debug_assert!(
373                            crate::util::metadata::vo_bit::is_vo_bit_set(object),
374                            "{:x}: VO bit not set",
375                            object
376                        );
377                    });
378                crate::util::metadata::vo_bit::bzero_vo_bit(start, end - start);
379            }
380            let mut to = start;
381            self.forwarding
382                .scan_marked_objects(start, end, &mut |obj: ObjectReference| {
383                    // We set the end bits based on the sizes of objects when they are
384                    // marked, and we compute the live data and thus the forwarding
385                    // addresses based on those sizes. The forwarding addresses would be
386                    // incorrect if the sizes of objects were to change.
387                    let copied_size = VM::VMObjectModel::get_size_when_copied(obj);
388                    debug_assert!(copied_size == VM::VMObjectModel::get_current_size(obj));
389                    let new_object = self.forward(obj, false);
390                    debug_assert!(
391                        new_object.to_raw_address() >= to,
392                        "whilst forwarding {obj}, the new address {0} should be after the end of the last object {to}",
393                        new_object.to_raw_address()
394                    );
395                    // copy object
396                    trace!(" copy from {} to {}", obj, new_object);
397                    let end_of_new_object =
398                        VM::VMObjectModel::copy_to(obj, new_object, Address::ZERO);
399                    // update VO bit
400                    #[cfg(feature = "vo_bit")]
401                    vo_bit::set_vo_bit(new_object);
402                    to = new_object.to_object_start::<VM>() + copied_size;
403                    debug_assert_eq!(end_of_new_object, to);
404                    self.update_references(worker, new_object);
405                });
406            self.pr.reset_cursor(r, to);
407        });
408    }
409
410    pub fn after_compact(&self, worker: &mut GCWorker<VM>, los: &LargeObjectSpace<VM>) {
411        self.pr.reset_allocator();
412        // Update references from the LOS to Compressor too.
413        los.enumerate_to_space_objects(&mut object_enum::ClosureObjectEnumerator::<_, VM>::new(
414            &mut |o: ObjectReference| {
415                self.update_references(worker, o);
416            },
417        ));
418    }
419}
420
421/// Calculate the offset vector for a region.
422pub struct CalculateOffsetVector<VM: VMBinding> {
423    compressor_space: &'static CompressorSpace<VM>,
424    region: forwarding::CompressorRegion,
425    cursor: Address,
426}
427
428impl<VM: VMBinding> GCWork<VM> for CalculateOffsetVector<VM> {
429    fn do_work(&mut self, _worker: &mut GCWorker<VM>, _mmtk: &'static MMTK<VM>) {
430        self.compressor_space
431            .calculate_offset_vector_for_region(self.region, self.cursor);
432    }
433}
434
435impl<VM: VMBinding> CalculateOffsetVector<VM> {
436    pub fn new(
437        compressor_space: &'static CompressorSpace<VM>,
438        region: forwarding::CompressorRegion,
439        cursor: Address,
440    ) -> Self {
441        Self {
442            compressor_space,
443            region,
444            cursor,
445        }
446    }
447}
448
449/// Compact live objects in a region.
450pub struct Compact<VM: VMBinding> {
451    compressor_space: &'static CompressorSpace<VM>,
452    index: usize,
453}
454
455impl<VM: VMBinding> GCWork<VM> for Compact<VM> {
456    fn do_work(&mut self, worker: &mut GCWorker<VM>, _mmtk: &'static MMTK<VM>) {
457        self.compressor_space.compact_region(worker, self.index);
458    }
459}
460
461impl<VM: VMBinding> Compact<VM> {
462    pub fn new(compressor_space: &'static CompressorSpace<VM>, index: usize) -> Self {
463        Self {
464            compressor_space,
465            index,
466        }
467    }
468}