mmtk/policy/
lockfreeimmortalspace.rs

1use atomic::Atomic;
2
3use std::sync::atomic::Ordering;
4use std::sync::Arc;
5
6use crate::policy::sft::GCWorkerMutRef;
7use crate::policy::sft::SFT;
8use crate::policy::space::{CommonSpace, Space};
9use crate::util::address::Address;
10
11use crate::util::alloc::allocator::AllocationOptions;
12use crate::util::conversions;
13use crate::util::heap::gc_trigger::GCTrigger;
14use crate::util::heap::layout::vm_layout::vm_layout;
15use crate::util::heap::PageResource;
16use crate::util::heap::VMRequest;
17use crate::util::memory::MmapAnnotation;
18use crate::util::memory::MmapStrategy;
19use crate::util::metadata::side_metadata::SideMetadataContext;
20use crate::util::metadata::side_metadata::SideMetadataSanity;
21use crate::util::object_enum::ObjectEnumerator;
22use crate::util::opaque_pointer::*;
23use crate::util::ObjectReference;
24use crate::vm::VMBinding;
25
26/// This type implements a lock free version of the immortal collection
27/// policy. This is close to the OpenJDK's epsilon GC.
28/// Different from the normal ImmortalSpace, this version should only
29/// be used by NoGC plan, and it now uses the whole heap range.
30// FIXME: It is wrong that the space uses the whole heap range. It has to reserve its own
31// range from HeapMeta, and not clash with other spaces.
32pub struct LockFreeImmortalSpace<VM: VMBinding> {
33    #[allow(unused)]
34    name: &'static str,
35    /// Heap range start
36    cursor: Atomic<Address>,
37    /// Heap range end
38    limit: Address,
39    /// start of this space
40    start: Address,
41    /// Total bytes for the space
42    total_bytes: usize,
43    /// Zero memory after slow-path allocation
44    slow_path_zeroing: bool,
45    metadata: SideMetadataContext,
46    gc_trigger: Arc<GCTrigger<VM>>,
47}
48
49impl<VM: VMBinding> SFT for LockFreeImmortalSpace<VM> {
50    fn name(&self) -> &'static str {
51        self.get_name()
52    }
53    fn is_live(&self, _object: ObjectReference) -> bool {
54        unimplemented!()
55    }
56    #[cfg(feature = "object_pinning")]
57    fn pin_object(&self, _object: ObjectReference) -> bool {
58        false
59    }
60    #[cfg(feature = "object_pinning")]
61    fn unpin_object(&self, _object: ObjectReference) -> bool {
62        false
63    }
64    #[cfg(feature = "object_pinning")]
65    fn is_object_pinned(&self, _object: ObjectReference) -> bool {
66        true
67    }
68    fn is_movable(&self) -> bool {
69        unimplemented!()
70    }
71    #[cfg(feature = "sanity")]
72    fn is_sane(&self) -> bool {
73        unimplemented!()
74    }
75    fn initialize_object_metadata(&self, _object: ObjectReference) {
76        #[cfg(feature = "vo_bit")]
77        crate::util::metadata::vo_bit::set_vo_bit(_object);
78    }
79    #[cfg(feature = "is_mmtk_object")]
80    fn is_mmtk_object(&self, addr: Address) -> Option<ObjectReference> {
81        crate::util::metadata::vo_bit::is_vo_bit_set_for_addr(addr)
82    }
83    #[cfg(feature = "is_mmtk_object")]
84    fn find_object_from_internal_pointer(
85        &self,
86        ptr: Address,
87        max_search_bytes: usize,
88    ) -> Option<ObjectReference> {
89        crate::util::metadata::vo_bit::find_object_from_internal_pointer::<VM>(
90            ptr,
91            max_search_bytes,
92        )
93    }
94    fn sft_trace_object(
95        &self,
96        _queue: &mut VectorObjectQueue,
97        _object: ObjectReference,
98        _worker: GCWorkerMutRef,
99    ) -> ObjectReference {
100        unreachable!()
101    }
102}
103
104impl<VM: VMBinding> Space<VM> for LockFreeImmortalSpace<VM> {
105    fn as_space(&self) -> &dyn Space<VM> {
106        self
107    }
108    fn as_sft(&self) -> &(dyn SFT + Sync + 'static) {
109        self
110    }
111    fn get_page_resource(&self) -> &dyn PageResource<VM> {
112        unimplemented!()
113    }
114    fn maybe_get_page_resource_mut(&mut self) -> Option<&mut dyn PageResource<VM>> {
115        None
116    }
117    fn common(&self) -> &CommonSpace<VM> {
118        unimplemented!()
119    }
120
121    fn get_gc_trigger(&self) -> &GCTrigger<VM> {
122        &self.gc_trigger
123    }
124
125    fn release_multiple_pages(&mut self, _start: Address) {
126        panic!("immortalspace only releases pages enmasse")
127    }
128
129    fn initialize_sft(&self, sft_map: &mut dyn crate::policy::sft_map::SFTMap) {
130        unsafe { sft_map.eager_initialize(self.as_sft(), self.start, self.total_bytes) };
131    }
132
133    fn estimate_side_meta_pages(&self, data_pages: usize) -> usize {
134        self.metadata.calculate_reserved_pages(data_pages)
135    }
136
137    fn reserved_pages(&self) -> usize {
138        let cursor = self.cursor.load(Ordering::Relaxed);
139        let data_pages = conversions::bytes_to_pages_up(self.limit - cursor);
140        let meta_pages = self.estimate_side_meta_pages(data_pages);
141        data_pages + meta_pages
142    }
143
144    fn acquire(&self, _tls: VMThread, pages: usize, alloc_options: AllocationOptions) -> Address {
145        trace!("LockFreeImmortalSpace::acquire");
146        let bytes = conversions::pages_to_bytes(pages);
147        let start = self
148            .cursor
149            .fetch_update(Ordering::Relaxed, Ordering::Relaxed, |addr| {
150                Some(addr.add(bytes))
151            })
152            .expect("update cursor failed");
153        if start + bytes > self.limit {
154            if alloc_options.allow_oom_call {
155                panic!("OutOfMemory");
156            } else {
157                return Address::ZERO;
158            }
159        }
160        if self.slow_path_zeroing {
161            crate::util::memory::zero(start, bytes);
162        }
163        start
164    }
165
166    /// Get the name of the space
167    ///
168    /// We have to override the default implementation because
169    /// LockFreeImmortalSpace doesn't have a common space
170    fn get_name(&self) -> &'static str {
171        "LockFreeImmortalSpace"
172    }
173
174    /// We have to override the default implementation because
175    /// LockFreeImmortalSpace doesn't put metadata in a common space
176    fn verify_side_metadata_sanity(&self, side_metadata_sanity_checker: &mut SideMetadataSanity) {
177        side_metadata_sanity_checker
178            .verify_metadata_context(std::any::type_name::<Self>(), &self.metadata)
179    }
180
181    fn enumerate_objects(&self, enumerator: &mut dyn ObjectEnumerator) {
182        enumerator.visit_address_range(self.start, self.start + self.total_bytes);
183    }
184
185    fn clear_side_log_bits(&self) {
186        unimplemented!()
187    }
188
189    fn set_side_log_bits(&self) {
190        unimplemented!()
191    }
192}
193
194use crate::plan::{ObjectQueue, VectorObjectQueue};
195use crate::scheduler::GCWorker;
196use crate::util::copy::CopySemantics;
197
198impl<VM: VMBinding> crate::policy::gc_work::PolicyTraceObject<VM> for LockFreeImmortalSpace<VM> {
199    fn trace_object<Q: ObjectQueue, const KIND: crate::policy::gc_work::TraceKind>(
200        &self,
201        _queue: &mut Q,
202        _object: ObjectReference,
203        _copy: Option<CopySemantics>,
204        _worker: &mut GCWorker<VM>,
205    ) -> ObjectReference {
206        unreachable!()
207    }
208    fn may_move_objects<const KIND: crate::policy::gc_work::TraceKind>() -> bool {
209        unreachable!()
210    }
211}
212
213impl<VM: VMBinding> LockFreeImmortalSpace<VM> {
214    #[allow(dead_code)] // Only used with certain features.
215    pub fn new(args: crate::policy::space::PlanCreateSpaceArgs<VM>) -> Self {
216        let slow_path_zeroing = args.zeroed;
217
218        // Get the total bytes for the heap.
219        let total_bytes = match *args.options.gc_trigger {
220            crate::util::options::GCTriggerSelector::FixedHeapSize(bytes) => bytes,
221            _ => unimplemented!(),
222        };
223        assert!(
224            total_bytes <= vm_layout().available_bytes(),
225            "Initial requested memory ({} bytes) overflows the heap. Max heap size is {} bytes.",
226            total_bytes,
227            vm_layout().available_bytes()
228        );
229        // Align up to chunks
230        let aligned_total_bytes = crate::util::conversions::raw_align_up(
231            total_bytes,
232            crate::util::heap::vm_layout::BYTES_IN_CHUNK,
233        );
234
235        // Create a VM request of fixed size
236        let vmrequest = VMRequest::fixed_size(aligned_total_bytes);
237        // Reserve the space
238        let VMRequest::Extent { extent, top } = vmrequest else {
239            unreachable!()
240        };
241        let start = args.heap.reserve(extent, top);
242
243        let space = Self {
244            name: args.name,
245            cursor: Atomic::new(start),
246            limit: start + aligned_total_bytes,
247            start,
248            total_bytes: aligned_total_bytes,
249            slow_path_zeroing,
250            metadata: SideMetadataContext {
251                global: args.global_side_metadata_specs,
252                local: vec![],
253            },
254            gc_trigger: args.gc_trigger,
255        };
256
257        // Eagerly memory map the entire heap (also zero all the memory)
258        let strategy = MmapStrategy::new(
259            *args.options.transparent_hugepages,
260            crate::util::memory::MmapProtection::ReadWrite,
261        );
262        crate::util::memory::dzmmap_noreplace(
263            start,
264            aligned_total_bytes,
265            strategy,
266            &MmapAnnotation::Space {
267                name: space.get_name(),
268            },
269        )
270        .unwrap();
271        space
272            .metadata
273            .try_map_metadata_space(start, aligned_total_bytes, space.get_name())
274            .unwrap_or_else(|e| {
275                // TODO(Javad): handle meta space allocation failure
276                panic!("failed to mmap meta memory: {e}")
277            });
278
279        space
280    }
281}