1use atomic::Ordering;
2
3use crate::plan::ObjectQueue;
4use crate::plan::VectorObjectQueue;
5use crate::policy::sft::GCWorkerMutRef;
6use crate::policy::sft::SFT;
7use crate::policy::space::{CommonSpace, Space};
8use crate::util::alloc::allocator::AllocationOptions;
9use crate::util::constants::BYTES_IN_PAGE;
10use crate::util::heap::{FreeListPageResource, PageResource};
11use crate::util::metadata;
12use crate::util::object_enum::ClosureObjectEnumerator;
13use crate::util::object_enum::ObjectEnumerator;
14use crate::util::opaque_pointer::*;
15use crate::util::treadmill::TreadMill;
16use crate::util::{Address, ObjectReference};
17use crate::vm::ObjectModel;
18use crate::vm::VMBinding;
19
20#[allow(unused)]
21const PAGE_MASK: usize = !(BYTES_IN_PAGE - 1);
22const MARK_BIT: u8 = 0b01;
23const NURSERY_BIT: u8 = 0b10;
24const LOS_BIT_MASK: u8 = 0b11;
25
26pub struct LargeObjectSpace<VM: VMBinding> {
29 common: CommonSpace<VM>,
30 pr: FreeListPageResource<VM>,
31 mark_state: u8,
32 in_nursery_gc: bool,
33 treadmill: TreadMill,
34 clear_log_bit_on_sweep: bool,
35}
36
37impl<VM: VMBinding> SFT for LargeObjectSpace<VM> {
38 fn name(&self) -> &'static str {
39 self.get_name()
40 }
41 fn is_live(&self, object: ObjectReference) -> bool {
42 self.test_mark_bit(object, self.mark_state)
43 }
44 #[cfg(feature = "object_pinning")]
45 fn pin_object(&self, _object: ObjectReference) -> bool {
46 false
47 }
48 #[cfg(feature = "object_pinning")]
49 fn unpin_object(&self, _object: ObjectReference) -> bool {
50 false
51 }
52 #[cfg(feature = "object_pinning")]
53 fn is_object_pinned(&self, _object: ObjectReference) -> bool {
54 true
55 }
56 fn is_movable(&self) -> bool {
57 false
58 }
59 #[cfg(feature = "sanity")]
60 fn is_sane(&self) -> bool {
61 true
62 }
63
64 fn initialize_object_metadata(&self, object: ObjectReference) {
65 #[cfg(feature = "vo_bit")]
67 crate::util::metadata::vo_bit::set_vo_bit(object);
68 #[cfg(all(feature = "is_mmtk_object", debug_assertions))]
69 {
70 use crate::util::constants::LOG_BYTES_IN_PAGE;
71 let vo_addr = object.to_raw_address();
72 let offset_from_page_start = vo_addr & ((1 << LOG_BYTES_IN_PAGE) - 1) as usize;
73 debug_assert!(
74 offset_from_page_start < crate::util::metadata::vo_bit::VO_BIT_WORD_TO_REGION,
75 "The raw address of ObjectReference is not in the first 512 bytes of a page. The internal pointer searching for LOS won't work."
76 );
77 }
78
79 let allocate_as_live = self.should_allocate_as_live();
80 let into_nursery = !allocate_as_live;
81
82 {
84 let mark_nursery_state = if into_nursery {
85 self.mark_state | NURSERY_BIT
86 } else {
87 self.mark_state
88 };
89
90 VM::VMObjectModel::LOCAL_LOS_MARK_NURSERY_SPEC.store_atomic::<VM, u8>(
91 object,
92 mark_nursery_state,
93 None,
94 Ordering::SeqCst,
95 );
96 }
97
98 if self.common.unlog_allocated_object {
100 debug_assert!(self.common.needs_log_bit);
101 debug_assert!(
102 !allocate_as_live,
103 "Currently only ConcurrentImmix can allocate as live, and it doesn't unlog allocated objects in LOS."
104 );
105
106 VM::VMObjectModel::GLOBAL_LOG_BIT_SPEC.mark_as_unlogged::<VM>(object, Ordering::SeqCst);
107 } else {
108 #[cfg(debug_assertions)]
109 if self.common.needs_log_bit {
110 debug_assert_eq!(
111 VM::VMObjectModel::GLOBAL_LOG_BIT_SPEC.load_atomic::<VM, u8>(
112 object,
113 None,
114 Ordering::Acquire
115 ),
116 0
117 );
118 }
119 }
120
121 self.treadmill.add_to_treadmill(object, into_nursery);
123 }
124
125 #[cfg(feature = "is_mmtk_object")]
126 fn is_mmtk_object(&self, addr: Address) -> Option<ObjectReference> {
127 crate::util::metadata::vo_bit::is_vo_bit_set_for_addr(addr)
128 }
129 #[cfg(feature = "is_mmtk_object")]
130 fn find_object_from_internal_pointer(
131 &self,
132 ptr: Address,
133 max_search_bytes: usize,
134 ) -> Option<ObjectReference> {
135 use crate::{util::metadata::vo_bit, MMAPPER};
136
137 let mmap_granularity = MMAPPER.granularity();
138
139 let mut mapped_grain = Address::MAX;
143
144 let mut cur_page = ptr.align_down(BYTES_IN_PAGE);
146 let low_page = ptr
147 .saturating_sub(max_search_bytes)
148 .align_down(BYTES_IN_PAGE);
149 while cur_page >= low_page {
150 if cur_page < mapped_grain {
151 if !cur_page.is_mapped() {
152 return None;
154 }
155 mapped_grain = cur_page.align_down(mmap_granularity);
157 }
158 if vo_bit::get_raw_vo_bit_word(cur_page) != 0 {
163 for offset in 0..vo_bit::VO_BIT_WORD_TO_REGION {
165 let addr = cur_page + offset;
166 if unsafe { vo_bit::is_vo_addr(addr) } {
167 return vo_bit::is_internal_ptr_from_vo_bit::<VM>(addr, ptr);
168 }
169 }
170 unreachable!(
171 "We found vo bit in the raw word, but we cannot find the exact address"
172 );
173 }
174
175 cur_page -= BYTES_IN_PAGE;
176 }
177 None
178 }
179 fn sft_trace_object(
180 &self,
181 queue: &mut VectorObjectQueue,
182 object: ObjectReference,
183 _worker: GCWorkerMutRef,
184 ) -> ObjectReference {
185 self.trace_object(queue, object)
186 }
187
188 fn debug_print_object_info(&self, object: ObjectReference) {
189 println!("marked = {}", self.test_mark_bit(object, self.mark_state));
190 println!("nursery = {}", self.is_in_nursery(object));
191 self.common.debug_print_object_global_info(object);
192 }
193}
194
195impl<VM: VMBinding> Space<VM> for LargeObjectSpace<VM> {
196 fn as_space(&self) -> &dyn Space<VM> {
197 self
198 }
199 fn as_sft(&self) -> &(dyn SFT + Sync + 'static) {
200 self
201 }
202 fn get_page_resource(&self) -> &dyn PageResource<VM> {
203 &self.pr
204 }
205 fn maybe_get_page_resource_mut(&mut self) -> Option<&mut dyn PageResource<VM>> {
206 Some(&mut self.pr)
207 }
208
209 fn initialize_sft(&self, sft_map: &mut dyn crate::policy::sft_map::SFTMap) {
210 self.common().initialize_sft(self.as_sft(), sft_map)
211 }
212
213 fn common(&self) -> &CommonSpace<VM> {
214 &self.common
215 }
216
217 fn release_multiple_pages(&mut self, start: Address) {
218 self.pr.release_pages(start);
219 }
220
221 fn enumerate_objects(&self, enumerator: &mut dyn ObjectEnumerator) {
222 assert!(
227 self.treadmill.is_collect_nursery_empty(),
228 "Collection nursery is not empty"
229 );
230 assert!(
231 self.treadmill.is_from_space_empty(),
232 "From-space is not empty"
233 );
234
235 self.treadmill.enumerate_objects(enumerator, false);
238 }
239
240 fn clear_side_log_bits(&self) {
241 let mut enumerator = ClosureObjectEnumerator::<_, VM>::new(|object| {
242 VM::VMObjectModel::GLOBAL_LOG_BIT_SPEC.clear::<VM>(object, Ordering::SeqCst);
243 });
244 self.treadmill.enumerate_objects(&mut enumerator, true);
247 }
248
249 fn set_side_log_bits(&self) {
250 let mut enumerator = ClosureObjectEnumerator::<_, VM>::new(|object| {
251 VM::VMObjectModel::GLOBAL_LOG_BIT_SPEC.mark_as_unlogged::<VM>(object, Ordering::SeqCst);
252 });
253 self.treadmill.enumerate_objects(&mut enumerator, true);
255 }
256}
257
258use crate::scheduler::GCWorker;
259use crate::util::copy::CopySemantics;
260
261impl<VM: VMBinding> crate::policy::gc_work::PolicyTraceObject<VM> for LargeObjectSpace<VM> {
262 fn trace_object<Q: ObjectQueue, const KIND: crate::policy::gc_work::TraceKind>(
263 &self,
264 queue: &mut Q,
265 object: ObjectReference,
266 _copy: Option<CopySemantics>,
267 _worker: &mut GCWorker<VM>,
268 ) -> ObjectReference {
269 self.trace_object(queue, object)
270 }
271 fn may_move_objects<const KIND: crate::policy::gc_work::TraceKind>() -> bool {
272 false
273 }
274}
275
276impl<VM: VMBinding> LargeObjectSpace<VM> {
277 pub fn new(
278 args: crate::policy::space::PlanCreateSpaceArgs<VM>,
279 protect_memory_on_release: bool,
280 clear_log_bit_on_sweep: bool,
281 ) -> Self {
282 let is_discontiguous = args.vmrequest.is_discontiguous();
283 let vm_map = args.vm_map;
284 let common = CommonSpace::new(args.into_policy_args(
285 false,
286 false,
287 metadata::extract_side_metadata(&[*VM::VMObjectModel::LOCAL_LOS_MARK_NURSERY_SPEC]),
288 ));
289 let mut pr = if is_discontiguous {
290 FreeListPageResource::new_discontiguous(vm_map)
291 } else {
292 FreeListPageResource::new_contiguous(common.start, common.extent, vm_map)
293 };
294 pr.protect_memory_on_release = if protect_memory_on_release {
295 Some(common.mmap_strategy().prot)
296 } else {
297 None
298 };
299 LargeObjectSpace {
300 pr,
301 common,
302 mark_state: 0,
303 in_nursery_gc: false,
304 treadmill: TreadMill::new(),
305 clear_log_bit_on_sweep,
306 }
307 }
308
309 pub fn prepare(&mut self, full_heap: bool) {
310 if full_heap {
311 self.mark_state = MARK_BIT - self.mark_state;
312 }
313 self.treadmill.flip(full_heap);
314 self.in_nursery_gc = !full_heap;
315 }
316
317 pub fn release(&mut self, full_heap: bool) {
318 debug_assert!(self.treadmill.is_alloc_nursery_empty());
322
323 self.sweep_large_pages(true);
324 debug_assert!(self.treadmill.is_collect_nursery_empty());
325 if full_heap {
326 self.sweep_large_pages(false);
327 debug_assert!(self.treadmill.is_from_space_empty());
328 }
329 }
330
331 #[allow(clippy::collapsible_if)]
334 pub fn trace_object<Q: ObjectQueue>(
335 &self,
336 queue: &mut Q,
337 object: ObjectReference,
338 ) -> ObjectReference {
339 #[cfg(feature = "vo_bit")]
340 debug_assert!(
341 crate::util::metadata::vo_bit::is_vo_bit_set(object),
342 "{:x}: VO bit not set",
343 object
344 );
345 let nursery_object = self.is_in_nursery(object);
346 trace!(
347 "LOS object {} {} a nursery object",
348 object,
349 if nursery_object { "is" } else { "is not" }
350 );
351 if !self.in_nursery_gc || nursery_object {
352 if self.test_and_mark(object, self.mark_state) {
355 trace!("LOS object {} is being marked now", object);
356 self.treadmill.copy(object, nursery_object);
357 if self.common.unlog_traced_object {
361 VM::VMObjectModel::GLOBAL_LOG_BIT_SPEC
362 .mark_as_unlogged::<VM>(object, Ordering::SeqCst);
363 }
364 queue.enqueue(object);
365 } else {
366 trace!(
367 "LOS object {} is not being marked now, it was marked before",
368 object
369 );
370 }
371 }
372 object
373 }
374
375 fn sweep_large_pages(&mut self, sweep_nursery: bool) {
376 let sweep = |object: ObjectReference| {
377 #[cfg(feature = "vo_bit")]
378 crate::util::metadata::vo_bit::unset_vo_bit(object);
379 if self.clear_log_bit_on_sweep {
381 VM::VMObjectModel::GLOBAL_LOG_BIT_SPEC.clear::<VM>(object, Ordering::SeqCst);
382 }
383 self.pr
384 .release_pages(get_super_page(object.to_object_start::<VM>()));
385 };
386 if sweep_nursery {
387 for object in self.treadmill.collect_nursery() {
388 sweep(object);
389 }
390 } else {
391 for object in self.treadmill.collect_mature() {
392 sweep(object)
393 }
394 }
395 }
396
397 pub(crate) fn enumerate_to_space_objects(&self, enumerator: &mut dyn ObjectEnumerator) {
400 debug_assert!(self.treadmill.is_alloc_nursery_empty());
403 self.treadmill.enumerate_objects(enumerator, false);
405 }
406
407 pub fn allocate_pages(
409 &self,
410 tls: VMThread,
411 pages: usize,
412 alloc_options: AllocationOptions,
413 ) -> Address {
414 self.acquire(tls, pages, alloc_options)
415 }
416
417 fn test_and_mark(&self, object: ObjectReference, value: u8) -> bool {
422 loop {
423 let mask = if self.in_nursery_gc {
424 LOS_BIT_MASK
425 } else {
426 MARK_BIT
427 };
428 let old_value = VM::VMObjectModel::LOCAL_LOS_MARK_NURSERY_SPEC.load_atomic::<VM, u8>(
429 object,
430 None,
431 Ordering::SeqCst,
432 );
433 let mark_bit = old_value & mask;
434 if mark_bit == value {
435 return false;
436 }
437 if VM::VMObjectModel::LOCAL_LOS_MARK_NURSERY_SPEC
439 .compare_exchange_metadata::<VM, u8>(
440 object,
441 old_value,
442 old_value & !LOS_BIT_MASK | value,
443 None,
444 Ordering::SeqCst,
445 Ordering::SeqCst,
446 )
447 .is_ok()
448 {
449 break;
450 }
451 }
452 true
453 }
454
455 fn test_mark_bit(&self, object: ObjectReference, value: u8) -> bool {
456 VM::VMObjectModel::LOCAL_LOS_MARK_NURSERY_SPEC.load_atomic::<VM, u8>(
457 object,
458 None,
459 Ordering::SeqCst,
460 ) & MARK_BIT
461 == value
462 }
463
464 fn is_in_nursery(&self, object: ObjectReference) -> bool {
466 VM::VMObjectModel::LOCAL_LOS_MARK_NURSERY_SPEC.load_atomic::<VM, u8>(
467 object,
468 None,
469 Ordering::Relaxed,
470 ) & NURSERY_BIT
471 == NURSERY_BIT
472 }
473
474 pub fn is_marked(&self, object: ObjectReference) -> bool {
475 self.test_mark_bit(object, self.mark_state)
476 }
477}
478
479fn get_super_page(cell: Address) -> Address {
480 cell.align_down(BYTES_IN_PAGE)
481}