mmtk/policy/
lockfreeimmortalspace.rs1use atomic::Atomic;
2
3use std::sync::atomic::Ordering;
4use std::sync::Arc;
5
6use crate::policy::sft::GCWorkerMutRef;
7use crate::policy::sft::SFT;
8use crate::policy::space::{CommonSpace, Space};
9use crate::util::address::Address;
10
11use crate::util::alloc::allocator::AllocationOptions;
12use crate::util::conversions;
13use crate::util::heap::gc_trigger::GCTrigger;
14use crate::util::heap::layout::vm_layout::vm_layout;
15use crate::util::heap::PageResource;
16use crate::util::heap::VMRequest;
17use crate::util::metadata::side_metadata::SideMetadataContext;
18use crate::util::metadata::side_metadata::SideMetadataSanity;
19use crate::util::object_enum::ObjectEnumerator;
20use crate::util::opaque_pointer::*;
21use crate::util::os::*;
22use crate::util::ObjectReference;
23use crate::vm::VMBinding;
24
25pub struct LockFreeImmortalSpace<VM: VMBinding> {
32 #[allow(unused)]
33 name: &'static str,
34 cursor: Atomic<Address>,
36 limit: Address,
38 start: Address,
40 total_bytes: usize,
42 slow_path_zeroing: bool,
44 metadata: SideMetadataContext,
45 gc_trigger: Arc<GCTrigger<VM>>,
46}
47
48impl<VM: VMBinding> SFT for LockFreeImmortalSpace<VM> {
49 fn name(&self) -> &'static str {
50 self.get_name()
51 }
52 fn is_live(&self, _object: ObjectReference) -> bool {
53 unimplemented!()
54 }
55 #[cfg(feature = "object_pinning")]
56 fn pin_object(&self, _object: ObjectReference) -> bool {
57 false
58 }
59 #[cfg(feature = "object_pinning")]
60 fn unpin_object(&self, _object: ObjectReference) -> bool {
61 false
62 }
63 #[cfg(feature = "object_pinning")]
64 fn is_object_pinned(&self, _object: ObjectReference) -> bool {
65 true
66 }
67 fn is_movable(&self) -> bool {
68 unimplemented!()
69 }
70 #[cfg(feature = "sanity")]
71 fn is_sane(&self) -> bool {
72 unimplemented!()
73 }
74 fn initialize_object_metadata(&self, _object: ObjectReference) {
75 #[cfg(feature = "vo_bit")]
76 crate::util::metadata::vo_bit::set_vo_bit(_object);
77 }
78 #[cfg(feature = "is_mmtk_object")]
79 fn is_mmtk_object(&self, addr: Address) -> Option<ObjectReference> {
80 crate::util::metadata::vo_bit::is_vo_bit_set_for_addr(addr)
81 }
82 #[cfg(feature = "is_mmtk_object")]
83 fn find_object_from_internal_pointer(
84 &self,
85 ptr: Address,
86 max_search_bytes: usize,
87 ) -> Option<ObjectReference> {
88 crate::util::metadata::vo_bit::find_object_from_internal_pointer::<VM>(
89 ptr,
90 max_search_bytes,
91 )
92 }
93 fn sft_trace_object(
94 &self,
95 _queue: &mut VectorObjectQueue,
96 _object: ObjectReference,
97 _worker: GCWorkerMutRef,
98 ) -> ObjectReference {
99 unreachable!()
100 }
101}
102
103impl<VM: VMBinding> Space<VM> for LockFreeImmortalSpace<VM> {
104 fn as_space(&self) -> &dyn Space<VM> {
105 self
106 }
107 fn as_sft(&self) -> &(dyn SFT + Sync + 'static) {
108 self
109 }
110 fn get_page_resource(&self) -> &dyn PageResource<VM> {
111 unimplemented!()
112 }
113 fn maybe_get_page_resource_mut(&mut self) -> Option<&mut dyn PageResource<VM>> {
114 None
115 }
116 fn common(&self) -> &CommonSpace<VM> {
117 unimplemented!()
118 }
119
120 fn get_gc_trigger(&self) -> &GCTrigger<VM> {
121 &self.gc_trigger
122 }
123
124 fn release_multiple_pages(&mut self, _start: Address) {
125 panic!("immortalspace only releases pages enmasse")
126 }
127
128 fn initialize_sft(&self, sft_map: &mut dyn crate::policy::sft_map::SFTMap) {
129 unsafe { sft_map.eager_initialize(self.as_sft(), self.start, self.total_bytes) };
130 }
131
132 fn estimate_side_meta_pages(&self, data_pages: usize) -> usize {
133 self.metadata.calculate_reserved_pages(data_pages)
134 }
135
136 fn reserved_pages(&self) -> usize {
137 let cursor = self.cursor.load(Ordering::Relaxed);
138 let data_pages = conversions::bytes_to_pages_up(self.limit - cursor);
139 let meta_pages = self.estimate_side_meta_pages(data_pages);
140 data_pages + meta_pages
141 }
142
143 fn acquire(&self, _tls: VMThread, pages: usize, alloc_options: AllocationOptions) -> Address {
144 trace!("LockFreeImmortalSpace::acquire");
145 let bytes = conversions::pages_to_bytes(pages);
146 let start = self
147 .cursor
148 .fetch_update(Ordering::Relaxed, Ordering::Relaxed, |addr| {
149 Some(addr.add(bytes))
150 })
151 .expect("update cursor failed");
152 if start + bytes > self.limit {
153 if alloc_options.allow_oom_call {
154 panic!("OutOfMemory");
155 } else {
156 return Address::ZERO;
157 }
158 }
159 if self.slow_path_zeroing {
160 crate::util::memory::zero(start, bytes);
161 }
162 start
163 }
164
165 fn get_name(&self) -> &'static str {
170 "LockFreeImmortalSpace"
171 }
172
173 fn verify_side_metadata_sanity(&self, side_metadata_sanity_checker: &mut SideMetadataSanity) {
176 side_metadata_sanity_checker
177 .verify_metadata_context(std::any::type_name::<Self>(), &self.metadata)
178 }
179
180 fn enumerate_objects(&self, enumerator: &mut dyn ObjectEnumerator) {
181 enumerator.visit_address_range(self.start, self.start + self.total_bytes);
182 }
183
184 fn clear_side_log_bits(&self) {
185 unimplemented!()
186 }
187
188 fn set_side_log_bits(&self) {
189 unimplemented!()
190 }
191}
192
193use crate::plan::{ObjectQueue, VectorObjectQueue};
194use crate::scheduler::GCWorker;
195use crate::util::copy::CopySemantics;
196
197impl<VM: VMBinding> crate::policy::gc_work::PolicyTraceObject<VM> for LockFreeImmortalSpace<VM> {
198 fn trace_object<Q: ObjectQueue, const KIND: crate::policy::gc_work::TraceKind>(
199 &self,
200 _queue: &mut Q,
201 _object: ObjectReference,
202 _copy: Option<CopySemantics>,
203 _worker: &mut GCWorker<VM>,
204 ) -> ObjectReference {
205 unreachable!()
206 }
207 fn may_move_objects<const KIND: crate::policy::gc_work::TraceKind>() -> bool {
208 unreachable!()
209 }
210}
211
212impl<VM: VMBinding> LockFreeImmortalSpace<VM> {
213 #[allow(dead_code)] pub fn new(args: crate::policy::space::PlanCreateSpaceArgs<VM>) -> Self {
215 let slow_path_zeroing = args.zeroed;
216
217 let total_bytes = match *args.options.gc_trigger {
219 crate::util::options::GCTriggerSelector::FixedHeapSize(bytes) => bytes,
220 _ => unimplemented!(),
221 };
222 assert!(
223 total_bytes <= vm_layout().available_bytes(),
224 "Initial requested memory ({} bytes) overflows the heap. Max heap size is {} bytes.",
225 total_bytes,
226 vm_layout().available_bytes()
227 );
228 let aligned_total_bytes = crate::util::conversions::raw_align_up(
230 total_bytes,
231 crate::util::heap::vm_layout::BYTES_IN_CHUNK,
232 );
233
234 let vmrequest = VMRequest::fixed_size(aligned_total_bytes);
236 let VMRequest::Extent { extent, top } = vmrequest else {
238 unreachable!()
239 };
240 let start = args.heap.reserve(extent, top);
241
242 let space = Self {
243 name: args.name,
244 cursor: Atomic::new(start),
245 limit: start + aligned_total_bytes,
246 start,
247 total_bytes: aligned_total_bytes,
248 slow_path_zeroing,
249 metadata: SideMetadataContext {
250 global: args.global_side_metadata_specs,
251 local: vec![],
252 },
253 gc_trigger: args.gc_trigger,
254 };
255
256 let strategy = MmapStrategy::default()
258 .transparent_hugepages(*args.options.transparent_hugepages)
259 .prot(crate::util::os::MmapProtection::ReadWrite)
260 .replace(false)
261 .reserve(true);
262 crate::util::os::OS::dzmmap(
263 start,
264 aligned_total_bytes,
265 strategy,
266 &MmapAnnotation::Space {
267 name: space.get_name(),
268 },
269 )
270 .unwrap();
271 space
272 .metadata
273 .try_map_metadata_space(start, aligned_total_bytes, space.get_name())
274 .unwrap_or_else(|e| {
275 panic!("failed to mmap meta memory: {e}")
277 });
278
279 space
280 }
281}