1use std::sync::{
2 atomic::{AtomicUsize, Ordering},
3 Arc,
4};
5
6use crate::{
7 policy::{marksweepspace::native_ms::*, sft::GCWorkerMutRef},
8 scheduler::{GCWorkScheduler, GCWorker, WorkBucketStage},
9 util::{
10 copy::CopySemantics,
11 epilogue,
12 heap::{BlockPageResource, PageResource},
13 metadata::{self, side_metadata::SideMetadataSpec, MetadataSpec},
14 object_enum::{self, ObjectEnumerator},
15 ObjectReference,
16 },
17 vm::{ActivePlan, VMBinding},
18};
19
20#[cfg(feature = "is_mmtk_object")]
21use crate::util::Address;
22
23use crate::plan::ObjectQueue;
24use crate::plan::VectorObjectQueue;
25use crate::policy::sft::SFT;
26use crate::policy::space::{CommonSpace, Space};
27use crate::util::alloc::allocator::AllocationOptions;
28use crate::util::constants::LOG_BYTES_IN_PAGE;
29use crate::util::heap::chunk_map::*;
30use crate::util::linear_scan::Region;
31use crate::util::VMThread;
32use crate::vm::ObjectModel;
33use crate::vm::Scanning;
34use std::sync::Mutex;
35
36pub enum BlockAcquireResult {
40 Exhausted,
41 Fresh(Block),
43 AbandonedAvailable(Block),
45 AbandonedUnswept(Block),
47}
48
49pub struct MarkSweepSpace<VM: VMBinding> {
71 pub common: CommonSpace<VM>,
72 pr: BlockPageResource<VM, Block>,
73 chunk_map: ChunkMap,
75 scheduler: Arc<GCWorkScheduler<VM>>,
77 abandoned: Mutex<AbandonedBlockLists>,
81 abandoned_in_gc: Mutex<AbandonedBlockLists>,
86 pending_release_packets: AtomicUsize,
89}
90
91unsafe impl<VM: VMBinding> Sync for MarkSweepSpace<VM> {}
92
93pub struct AbandonedBlockLists {
94 pub available: BlockLists,
95 pub unswept: BlockLists,
96 pub consumed: BlockLists,
97}
98
99impl AbandonedBlockLists {
100 fn new() -> Self {
101 Self {
102 available: new_empty_block_lists(),
103 unswept: new_empty_block_lists(),
104 consumed: new_empty_block_lists(),
105 }
106 }
107
108 fn sweep_later<VM: VMBinding>(&mut self, space: &MarkSweepSpace<VM>) {
109 for i in 0..MI_BIN_FULL {
110 self.available[i].release_blocks(space);
112 self.consumed[i].release_blocks(space);
113 if cfg!(not(feature = "eager_sweeping")) {
114 self.unswept[i].release_blocks(space);
115 } else {
116 debug_assert!(self.unswept[i].is_empty());
118 }
119
120 if cfg!(not(feature = "eager_sweeping")) {
126 self.unswept[i].append(&mut self.available[i]);
127 self.unswept[i].append(&mut self.consumed[i]);
128 }
129 }
130 }
131
132 fn recycle_blocks(&mut self) {
133 for i in 0..MI_BIN_FULL {
134 for block in self.consumed[i].iter() {
135 if block.has_free_cells() {
136 self.consumed[i].remove(block);
137 self.available[i].push(block);
138 }
139 }
140 }
141 }
142
143 fn merge(&mut self, other: &mut Self) {
144 for i in 0..MI_BIN_FULL {
145 self.available[i].append(&mut other.available[i]);
146 self.unswept[i].append(&mut other.unswept[i]);
147 self.consumed[i].append(&mut other.consumed[i]);
148 }
149 }
150
151 #[cfg(debug_assertions)]
152 fn assert_empty(&self) {
153 for i in 0..MI_BIN_FULL {
154 assert!(self.available[i].is_empty());
155 assert!(self.unswept[i].is_empty());
156 assert!(self.consumed[i].is_empty());
157 }
158 }
159}
160
161impl<VM: VMBinding> SFT for MarkSweepSpace<VM> {
162 fn name(&self) -> &'static str {
163 self.common.name
164 }
165
166 fn is_live(&self, object: crate::util::ObjectReference) -> bool {
167 VM::VMObjectModel::LOCAL_MARK_BIT_SPEC.is_marked::<VM>(object, Ordering::SeqCst)
168 }
169
170 #[cfg(feature = "object_pinning")]
171 fn pin_object(&self, _object: ObjectReference) -> bool {
172 false
173 }
174
175 #[cfg(feature = "object_pinning")]
176 fn unpin_object(&self, _object: ObjectReference) -> bool {
177 false
178 }
179
180 #[cfg(feature = "object_pinning")]
181 fn is_object_pinned(&self, _object: ObjectReference) -> bool {
182 false
183 }
184
185 fn is_movable(&self) -> bool {
186 false
187 }
188
189 #[cfg(feature = "sanity")]
190 fn is_sane(&self) -> bool {
191 true
192 }
193
194 fn initialize_object_metadata(&self, _object: crate::util::ObjectReference) {
195 #[cfg(feature = "vo_bit")]
196 crate::util::metadata::vo_bit::set_vo_bit(_object);
197 }
198
199 #[cfg(feature = "is_mmtk_object")]
200 fn is_mmtk_object(&self, addr: Address) -> Option<ObjectReference> {
201 crate::util::metadata::vo_bit::is_vo_bit_set_for_addr(addr)
202 }
203
204 #[cfg(feature = "is_mmtk_object")]
205 fn find_object_from_internal_pointer(
206 &self,
207 ptr: Address,
208 max_search_bytes: usize,
209 ) -> Option<ObjectReference> {
210 let search_bytes = usize::min(MAX_OBJECT_SIZE, max_search_bytes);
212 crate::util::metadata::vo_bit::find_object_from_internal_pointer::<VM>(ptr, search_bytes)
213 }
214
215 fn sft_trace_object(
216 &self,
217 queue: &mut VectorObjectQueue,
218 object: ObjectReference,
219 _worker: GCWorkerMutRef,
220 ) -> ObjectReference {
221 self.trace_object(queue, object)
222 }
223}
224
225impl<VM: VMBinding> Space<VM> for MarkSweepSpace<VM> {
226 fn as_space(&self) -> &dyn Space<VM> {
227 self
228 }
229
230 fn as_sft(&self) -> &(dyn SFT + Sync + 'static) {
231 self
232 }
233
234 fn get_page_resource(&self) -> &dyn crate::util::heap::PageResource<VM> {
235 &self.pr
236 }
237
238 fn maybe_get_page_resource_mut(&mut self) -> Option<&mut dyn PageResource<VM>> {
239 Some(&mut self.pr)
240 }
241
242 fn initialize_sft(&self, sft_map: &mut dyn crate::policy::sft_map::SFTMap) {
243 self.common().initialize_sft(self.as_sft(), sft_map)
244 }
245
246 fn common(&self) -> &CommonSpace<VM> {
247 &self.common
248 }
249
250 fn release_multiple_pages(&mut self, _start: crate::util::Address) {
251 todo!()
252 }
253
254 fn enumerate_objects(&self, enumerator: &mut dyn ObjectEnumerator) {
255 object_enum::enumerate_blocks_from_chunk_map::<Block>(enumerator, &self.chunk_map);
256 }
257
258 fn clear_side_log_bits(&self) {
259 let log_bit = VM::VMObjectModel::GLOBAL_LOG_BIT_SPEC.extract_side_spec();
260 for chunk in self.chunk_map.all_chunks() {
261 log_bit.bzero_metadata(chunk.start(), Chunk::BYTES);
262 }
263 }
264
265 fn set_side_log_bits(&self) {
266 let log_bit = VM::VMObjectModel::GLOBAL_LOG_BIT_SPEC.extract_side_spec();
267 for chunk in self.chunk_map.all_chunks() {
268 log_bit.bset_metadata(chunk.start(), Chunk::BYTES);
269 }
270 }
271}
272
273impl<VM: VMBinding> crate::policy::gc_work::PolicyTraceObject<VM> for MarkSweepSpace<VM> {
274 fn trace_object<Q: ObjectQueue, const KIND: crate::policy::gc_work::TraceKind>(
275 &self,
276 queue: &mut Q,
277 object: ObjectReference,
278 _copy: Option<CopySemantics>,
279 _worker: &mut GCWorker<VM>,
280 ) -> ObjectReference {
281 self.trace_object(queue, object)
282 }
283
284 fn may_move_objects<const KIND: crate::policy::gc_work::TraceKind>() -> bool {
285 false
286 }
287}
288
289#[allow(dead_code)]
291pub const MAX_OBJECT_SIZE: usize = crate::policy::marksweepspace::native_ms::MI_LARGE_OBJ_SIZE_MAX;
292
293impl<VM: VMBinding> MarkSweepSpace<VM> {
294 #[allow(clippy::ptr_arg)]
296 pub fn extend_global_side_metadata_specs(_specs: &mut Vec<SideMetadataSpec>) {
297 }
300
301 pub fn new(args: crate::policy::space::PlanCreateSpaceArgs<VM>) -> MarkSweepSpace<VM> {
302 let scheduler = args.scheduler.clone();
303 let vm_map = args.vm_map;
304 let is_discontiguous = args.vmrequest.is_discontiguous();
305 let local_specs = {
306 metadata::extract_side_metadata(&[
307 MetadataSpec::OnSide(Block::NEXT_BLOCK_TABLE),
308 MetadataSpec::OnSide(Block::PREV_BLOCK_TABLE),
309 MetadataSpec::OnSide(Block::FREE_LIST_TABLE),
310 MetadataSpec::OnSide(Block::SIZE_TABLE),
311 #[cfg(feature = "malloc_native_mimalloc")]
312 MetadataSpec::OnSide(Block::LOCAL_FREE_LIST_TABLE),
313 #[cfg(feature = "malloc_native_mimalloc")]
314 MetadataSpec::OnSide(Block::THREAD_FREE_LIST_TABLE),
315 MetadataSpec::OnSide(Block::BLOCK_LIST_TABLE),
316 MetadataSpec::OnSide(Block::TLS_TABLE),
317 MetadataSpec::OnSide(Block::MARK_TABLE),
318 *VM::VMObjectModel::LOCAL_MARK_BIT_SPEC,
319 ])
320 };
321 let common = CommonSpace::new(args.into_policy_args(false, false, local_specs));
322 let space_index = common.descriptor.get_index();
323 MarkSweepSpace {
324 pr: if is_discontiguous {
325 BlockPageResource::new_discontiguous(
326 Block::LOG_PAGES,
327 vm_map,
328 scheduler.num_workers(),
329 )
330 } else {
331 BlockPageResource::new_contiguous(
332 Block::LOG_PAGES,
333 common.start,
334 common.extent,
335 vm_map,
336 scheduler.num_workers(),
337 )
338 },
339 common,
340 chunk_map: ChunkMap::new(space_index),
341 scheduler,
342 abandoned: Mutex::new(AbandonedBlockLists::new()),
343 abandoned_in_gc: Mutex::new(AbandonedBlockLists::new()),
344 pending_release_packets: AtomicUsize::new(0),
345 }
346 }
347
348 fn attempt_mark_non_atomic(&self, object: ObjectReference) -> bool {
351 if !VM::VMObjectModel::LOCAL_MARK_BIT_SPEC.is_marked::<VM>(object, Ordering::SeqCst) {
352 VM::VMObjectModel::LOCAL_MARK_BIT_SPEC.mark::<VM>(object, Ordering::SeqCst);
353 true
354 } else {
355 false
356 }
357 }
358
359 fn attempt_mark_atomic(&self, object: ObjectReference) -> bool {
361 let mark_state = 1u8;
362
363 loop {
364 let old_value = VM::VMObjectModel::LOCAL_MARK_BIT_SPEC.load_atomic::<VM, u8>(
365 object,
366 None,
367 Ordering::SeqCst,
368 );
369 if old_value == mark_state {
370 return false;
371 }
372
373 if VM::VMObjectModel::LOCAL_MARK_BIT_SPEC
374 .compare_exchange_metadata::<VM, u8>(
375 object,
376 old_value,
377 mark_state,
378 None,
379 Ordering::SeqCst,
380 Ordering::SeqCst,
381 )
382 .is_ok()
383 {
384 break;
385 }
386 }
387 true
388 }
389
390 fn attempt_mark(&self, object: ObjectReference) -> bool {
393 if VM::VMScanning::UNIQUE_OBJECT_ENQUEUING {
394 self.attempt_mark_atomic(object)
395 } else {
396 self.attempt_mark_non_atomic(object)
397 }
398 }
399
400 fn trace_object<Q: ObjectQueue>(
401 &self,
402 queue: &mut Q,
403 object: ObjectReference,
404 ) -> ObjectReference {
405 debug_assert!(
406 self.in_space(object),
407 "Cannot mark an object {} that was not alloced by free list allocator.",
408 object,
409 );
410 if self.attempt_mark(object) {
411 let block = Block::containing(object);
412 block.set_state(BlockState::Marked);
413 queue.enqueue(object);
414 }
415 object
416 }
417
418 pub fn record_new_block(&self, block: Block) {
419 block.init();
420 self.chunk_map.set_allocated(block.chunk(), true);
421 }
422
423 pub fn prepare(&mut self, _full_heap: bool) {
424 #[cfg(debug_assertions)]
425 self.abandoned_in_gc.lock().unwrap().assert_empty();
426
427 let space = unsafe { &*(self as *const Self) };
429 let work_packets = self
430 .chunk_map
431 .generate_tasks(|chunk| Box::new(PrepareChunkMap { space, chunk }));
432 self.scheduler.work_buckets[crate::scheduler::WorkBucketStage::Prepare]
433 .bulk_add(work_packets);
434 }
435
436 pub fn release(&mut self) {
437 let num_mutators = VM::VMActivePlan::number_of_mutators();
438 self.pending_release_packets
440 .store(num_mutators + 1, Ordering::SeqCst);
441
442 let space = unsafe { &*(self as *const Self) };
445 let work_packet = ReleaseMarkSweepSpace { space };
446 self.scheduler.work_buckets[crate::scheduler::WorkBucketStage::Release].add(work_packet);
447 }
448
449 pub fn end_of_gc(&mut self) {
450 epilogue::debug_assert_counter_zero(
451 &self.pending_release_packets,
452 "pending_release_packets",
453 );
454 }
455
456 pub fn release_block(&self, block: Block) {
458 self.block_clear_metadata(block);
459
460 block.deinit();
461 self.pr.release_block(block);
462 }
463
464 pub fn block_clear_metadata(&self, block: Block) {
465 for metadata_spec in Block::METADATA_SPECS {
466 metadata_spec.set_zero_atomic(block.start(), Ordering::SeqCst);
467 }
468 #[cfg(feature = "vo_bit")]
469 crate::util::metadata::vo_bit::bzero_vo_bit(block.start(), Block::BYTES);
470 }
471
472 pub fn acquire_block(
473 &self,
474 tls: VMThread,
475 size: usize,
476 align: usize,
477 alloc_options: AllocationOptions,
478 ) -> BlockAcquireResult {
479 {
480 let mut abandoned = self.abandoned.lock().unwrap();
481 let bin = mi_bin::<VM>(size, align);
482
483 {
484 let abandoned_available = &mut abandoned.available;
485 if !abandoned_available[bin].is_empty() {
486 let block = abandoned_available[bin].pop().unwrap();
487 return BlockAcquireResult::AbandonedAvailable(block);
488 }
489 }
490
491 {
492 let abandoned_unswept = &mut abandoned.unswept;
493 if !abandoned_unswept[bin].is_empty() {
494 let block = abandoned_unswept[bin].pop().unwrap();
495 return BlockAcquireResult::AbandonedUnswept(block);
496 }
497 }
498 }
499
500 let acquired = self.acquire(tls, Block::BYTES >> LOG_BYTES_IN_PAGE, alloc_options);
501 if acquired.is_zero() {
502 BlockAcquireResult::Exhausted
503 } else {
504 BlockAcquireResult::Fresh(Block::from_unaligned_address(acquired))
505 }
506 }
507
508 pub fn get_abandoned_block_lists(&self) -> &Mutex<AbandonedBlockLists> {
509 &self.abandoned
510 }
511
512 pub fn get_abandoned_block_lists_in_gc(&self) -> &Mutex<AbandonedBlockLists> {
513 &self.abandoned_in_gc
514 }
515
516 pub fn release_packet_done(&self) {
517 let old = self.pending_release_packets.fetch_sub(1, Ordering::SeqCst);
518 if old == 1 {
519 if cfg!(feature = "eager_sweeping") {
520 let work_packets = self.generate_sweep_tasks();
523 self.scheduler.work_buckets[WorkBucketStage::Release].bulk_add(work_packets);
524 } else {
525 self.recycle_blocks();
527 }
528 }
529 }
530
531 fn generate_sweep_tasks(&self) -> Vec<Box<dyn GCWork<VM>>> {
532 let space = unsafe { &*(self as *const Self) };
533 let epilogue = Arc::new(RecycleBlocks {
534 space,
535 counter: AtomicUsize::new(0),
536 });
537 let tasks = self.chunk_map.generate_tasks(|chunk| {
538 Box::new(SweepChunk {
539 space,
540 chunk,
541 epilogue: epilogue.clone(),
542 })
543 });
544 epilogue.counter.store(tasks.len(), Ordering::SeqCst);
545 tasks
546 }
547
548 fn recycle_blocks(&self) {
549 {
550 let mut abandoned = self.abandoned.try_lock().unwrap();
551 let mut abandoned_in_gc = self.abandoned_in_gc.try_lock().unwrap();
552
553 if cfg!(feature = "eager_sweeping") {
554 abandoned.recycle_blocks();
557 abandoned_in_gc.recycle_blocks();
558 }
559
560 abandoned.merge(&mut abandoned_in_gc);
561
562 #[cfg(debug_assertions)]
563 abandoned_in_gc.assert_empty();
564 }
565
566 self.pr.flush_all();
570 }
571}
572
573use crate::scheduler::GCWork;
574use crate::MMTK;
575
576struct PrepareChunkMap<VM: VMBinding> {
577 space: &'static MarkSweepSpace<VM>,
578 chunk: Chunk,
579}
580
581impl<VM: VMBinding> GCWork<VM> for PrepareChunkMap<VM> {
582 fn do_work(&mut self, _worker: &mut GCWorker<VM>, _mmtk: &'static MMTK<VM>) {
583 debug_assert!(self.space.chunk_map.get(self.chunk).unwrap().is_allocated());
584 let mut n_occupied_blocks = 0;
586 self.chunk
587 .iter_region::<Block>()
588 .filter(|block| block.get_state() != BlockState::Unallocated)
589 .for_each(|block| {
590 block.set_state(BlockState::Unmarked);
592 n_occupied_blocks += 1
594 });
595 if n_occupied_blocks == 0 {
596 self.space.chunk_map.set_allocated(self.chunk, false)
598 } else {
599 if let MetadataSpec::OnSide(side) = *VM::VMObjectModel::LOCAL_MARK_BIT_SPEC {
601 side.bzero_metadata(self.chunk.start(), Chunk::BYTES);
602 }
603 }
604 }
605}
606
607struct ReleaseMarkSweepSpace<VM: VMBinding> {
608 space: &'static MarkSweepSpace<VM>,
609}
610
611impl<VM: VMBinding> GCWork<VM> for ReleaseMarkSweepSpace<VM> {
612 fn do_work(&mut self, _worker: &mut GCWorker<VM>, _mmtk: &'static MMTK<VM>) {
613 {
614 let mut abandoned = self.space.abandoned.lock().unwrap();
615 abandoned.sweep_later(self.space);
616 }
617
618 self.space.release_packet_done();
619 }
620}
621
622struct SweepChunk<VM: VMBinding> {
625 space: &'static MarkSweepSpace<VM>,
626 chunk: Chunk,
627 epilogue: Arc<RecycleBlocks<VM>>,
629}
630
631impl<VM: VMBinding> GCWork<VM> for SweepChunk<VM> {
632 fn do_work(&mut self, _worker: &mut GCWorker<VM>, _mmtk: &'static MMTK<VM>) {
633 assert!(self.space.chunk_map.get(self.chunk).unwrap().is_allocated());
634
635 let mut allocated_blocks = 0;
637 for block in self
639 .chunk
640 .iter_region::<Block>()
641 .filter(|block| block.get_state() != BlockState::Unallocated)
642 {
643 debug_assert_eq!(block.get_state(), BlockState::Marked);
646 block.sweep::<VM>();
647 allocated_blocks += 1;
648 }
649 probe!(mmtk, sweep_chunk, allocated_blocks);
650 if allocated_blocks == 0 {
652 self.space.chunk_map.set_allocated(self.chunk, false);
653 }
654 self.epilogue.finish_one_work_packet();
655 }
656}
657
658struct RecycleBlocks<VM: VMBinding> {
659 space: &'static MarkSweepSpace<VM>,
660 counter: AtomicUsize,
661}
662
663impl<VM: VMBinding> RecycleBlocks<VM> {
664 fn finish_one_work_packet(&self) {
665 if 1 == self.counter.fetch_sub(1, Ordering::SeqCst) {
666 self.space.recycle_blocks()
667 }
668 }
669}
670
671impl<VM: VMBinding> Drop for RecycleBlocks<VM> {
672 fn drop(&mut self) {
673 epilogue::debug_assert_counter_zero(&self.counter, "RecycleBlocks::counter");
674 }
675}