mmtk/policy/
immortalspace.rs

1use atomic::Ordering;
2
3use crate::policy::sft::SFT;
4use crate::policy::space::{CommonSpace, Space};
5use crate::util::address::Address;
6use crate::util::heap::{MonotonePageResource, PageResource};
7use crate::util::metadata::mark_bit::MarkState;
8
9use crate::util::object_enum::{self, ObjectEnumerator};
10use crate::util::{metadata, ObjectReference};
11
12use crate::plan::{ObjectQueue, VectorObjectQueue};
13
14use crate::policy::sft::GCWorkerMutRef;
15use crate::vm::{ObjectModel, VMBinding};
16
17/// This type implements a simple immortal collection
18/// policy. Under this policy all that is required is for the
19/// "collector" to propagate marks in a liveness trace.  It does not
20/// actually collect.
21pub struct ImmortalSpace<VM: VMBinding> {
22    mark_state: MarkState,
23    common: CommonSpace<VM>,
24    pr: MonotonePageResource<VM>,
25}
26
27impl<VM: VMBinding> SFT for ImmortalSpace<VM> {
28    fn name(&self) -> &'static str {
29        self.get_name()
30    }
31    fn is_live(&self, _object: ObjectReference) -> bool {
32        true
33    }
34    fn is_reachable(&self, object: ObjectReference) -> bool {
35        self.mark_state.is_marked::<VM>(object)
36    }
37    #[cfg(feature = "object_pinning")]
38    fn pin_object(&self, _object: ObjectReference) -> bool {
39        false
40    }
41    #[cfg(feature = "object_pinning")]
42    fn unpin_object(&self, _object: ObjectReference) -> bool {
43        false
44    }
45    #[cfg(feature = "object_pinning")]
46    fn is_object_pinned(&self, _object: ObjectReference) -> bool {
47        true
48    }
49    fn is_movable(&self) -> bool {
50        false
51    }
52    #[cfg(feature = "sanity")]
53    fn is_sane(&self) -> bool {
54        true
55    }
56    fn initialize_object_metadata(&self, object: ObjectReference) {
57        self.mark_state
58            .on_object_metadata_initialization::<VM>(object);
59        if self.common.unlog_allocated_object {
60            VM::VMObjectModel::GLOBAL_LOG_BIT_SPEC.mark_as_unlogged::<VM>(object, Ordering::SeqCst);
61        }
62        #[cfg(feature = "vo_bit")]
63        crate::util::metadata::vo_bit::set_vo_bit(object);
64    }
65    #[cfg(feature = "is_mmtk_object")]
66    fn is_mmtk_object(&self, addr: Address) -> Option<ObjectReference> {
67        crate::util::metadata::vo_bit::is_vo_bit_set_for_addr(addr)
68    }
69    #[cfg(feature = "is_mmtk_object")]
70    fn find_object_from_internal_pointer(
71        &self,
72        ptr: Address,
73        max_search_bytes: usize,
74    ) -> Option<ObjectReference> {
75        crate::util::metadata::vo_bit::find_object_from_internal_pointer::<VM>(
76            ptr,
77            max_search_bytes,
78        )
79    }
80    fn sft_trace_object(
81        &self,
82        queue: &mut VectorObjectQueue,
83        object: ObjectReference,
84        _worker: GCWorkerMutRef,
85    ) -> ObjectReference {
86        self.trace_object(queue, object)
87    }
88    fn debug_print_object_info(&self, object: ObjectReference) {
89        println!("marked = {}", self.mark_state.is_marked::<VM>(object));
90        self.common.debug_print_object_global_info(object);
91    }
92}
93
94impl<VM: VMBinding> Space<VM> for ImmortalSpace<VM> {
95    fn as_space(&self) -> &dyn Space<VM> {
96        self
97    }
98    fn as_sft(&self) -> &(dyn SFT + Sync + 'static) {
99        self
100    }
101    fn get_page_resource(&self) -> &dyn PageResource<VM> {
102        &self.pr
103    }
104    fn maybe_get_page_resource_mut(&mut self) -> Option<&mut dyn PageResource<VM>> {
105        Some(&mut self.pr)
106    }
107    fn common(&self) -> &CommonSpace<VM> {
108        &self.common
109    }
110
111    fn initialize_sft(&self, sft_map: &mut dyn crate::policy::sft_map::SFTMap) {
112        self.common().initialize_sft(self.as_sft(), sft_map)
113    }
114
115    fn release_multiple_pages(&mut self, _start: Address) {
116        panic!("immortalspace only releases pages enmasse")
117    }
118
119    fn enumerate_objects(&self, enumerator: &mut dyn ObjectEnumerator) {
120        object_enum::enumerate_blocks_from_monotonic_page_resource(enumerator, &self.pr);
121    }
122
123    fn clear_side_log_bits(&self) {
124        let log_bit = VM::VMObjectModel::GLOBAL_LOG_BIT_SPEC.extract_side_spec();
125        for (start, size) in self.pr.iterate_allocated_regions() {
126            log_bit.bzero_metadata(start, size);
127        }
128    }
129
130    fn set_side_log_bits(&self) {
131        let log_bit = VM::VMObjectModel::GLOBAL_LOG_BIT_SPEC.extract_side_spec();
132        for (start, size) in self.pr.iterate_allocated_regions() {
133            log_bit.bset_metadata(start, size);
134        }
135    }
136}
137
138use crate::scheduler::GCWorker;
139use crate::util::copy::CopySemantics;
140
141impl<VM: VMBinding> crate::policy::gc_work::PolicyTraceObject<VM> for ImmortalSpace<VM> {
142    fn trace_object<Q: ObjectQueue, const KIND: crate::policy::gc_work::TraceKind>(
143        &self,
144        queue: &mut Q,
145        object: ObjectReference,
146        _copy: Option<CopySemantics>,
147        _worker: &mut GCWorker<VM>,
148    ) -> ObjectReference {
149        self.trace_object(queue, object)
150    }
151    fn may_move_objects<const KIND: crate::policy::gc_work::TraceKind>() -> bool {
152        false
153    }
154}
155
156impl<VM: VMBinding> ImmortalSpace<VM> {
157    pub fn new(args: crate::policy::space::PlanCreateSpaceArgs<VM>) -> Self {
158        let vm_map = args.vm_map;
159        let is_discontiguous = args.vmrequest.is_discontiguous();
160        let common = CommonSpace::new(args.into_policy_args(
161            false,
162            true,
163            metadata::extract_side_metadata(&[*VM::VMObjectModel::LOCAL_MARK_BIT_SPEC]),
164        ));
165        ImmortalSpace {
166            mark_state: MarkState::new(),
167            pr: if is_discontiguous {
168                MonotonePageResource::new_discontiguous(vm_map)
169            } else {
170                MonotonePageResource::new_contiguous(common.start, common.extent, vm_map)
171            },
172            common,
173        }
174    }
175
176    pub fn prepare(&mut self) {
177        self.mark_state.on_global_prepare::<VM>();
178        // Reset the mark bit for the allocated regions.
179        for (addr, size) in self.pr.iterate_allocated_regions() {
180            debug!(
181                "{:?}: reset mark bit from {} to {}",
182                self.name(),
183                addr,
184                addr + size
185            );
186            self.mark_state.on_block_reset::<VM>(addr, size);
187        }
188    }
189
190    pub fn release(&mut self) {
191        self.mark_state.on_global_release::<VM>();
192    }
193
194    pub fn trace_object<Q: ObjectQueue>(
195        &self,
196        queue: &mut Q,
197        object: ObjectReference,
198    ) -> ObjectReference {
199        #[cfg(feature = "vo_bit")]
200        debug_assert!(
201            crate::util::metadata::vo_bit::is_vo_bit_set(object),
202            "{:x}: VO bit not set",
203            object
204        );
205        if self.mark_state.test_and_mark::<VM>(object) {
206            // Set the unlog bit if required
207            if self.common.unlog_traced_object {
208                VM::VMObjectModel::GLOBAL_LOG_BIT_SPEC.store_atomic::<VM, u8>(
209                    object,
210                    1,
211                    None,
212                    Ordering::SeqCst,
213                );
214            }
215            queue.enqueue(object);
216        }
217        object
218    }
219}