1use super::work_bucket::WorkBucketStage;
2use super::*;
3use crate::global_state::GcStatus;
4use crate::plan::ObjectsClosure;
5use crate::plan::VectorObjectQueue;
6use crate::util::*;
7use crate::vm::slot::Slot;
8use crate::vm::*;
9use crate::*;
10use std::marker::PhantomData;
11use std::ops::{Deref, DerefMut};
12
13pub struct ScheduleCollection;
14
15impl<VM: VMBinding> GCWork<VM> for ScheduleCollection {
16 fn do_work(&mut self, worker: &mut GCWorker<VM>, mmtk: &'static MMTK<VM>) {
17 mmtk.gc_trigger.policy.on_gc_start(mmtk);
19
20 let is_emergency = mmtk.state.set_collection_kind(
22 mmtk.get_plan().last_collection_was_exhaustive(),
23 mmtk.gc_trigger.policy.can_heap_size_grow(),
24 );
25 if is_emergency {
26 mmtk.get_plan().notify_emergency_collection();
27 }
28 mmtk.set_gc_status(GcStatus::GcPrepare);
30
31 mmtk.get_plan().schedule_collection(worker.scheduler());
33 }
34}
35
36pub struct Prepare<C: GCWorkContext> {
44 pub plan: *const C::PlanType,
45}
46
47unsafe impl<C: GCWorkContext> Send for Prepare<C> {}
48
49impl<C: GCWorkContext> Prepare<C> {
50 pub fn new(plan: *const C::PlanType) -> Self {
51 Self { plan }
52 }
53}
54
55impl<C: GCWorkContext> GCWork<C::VM> for Prepare<C> {
56 fn do_work(&mut self, worker: &mut GCWorker<C::VM>, mmtk: &'static MMTK<C::VM>) {
57 trace!("Prepare Global");
58 let plan_mut: &mut C::PlanType = unsafe { &mut *(self.plan as *const _ as *mut _) };
60 plan_mut.prepare(worker.tls);
61
62 if plan_mut.constraints().needs_prepare_mutator {
63 let prepare_mutator_packets = <C::VM as VMBinding>::VMActivePlan::mutators()
64 .map(|mutator| Box::new(PrepareMutator::<C::VM>::new(mutator)) as _)
65 .collect::<Vec<_>>();
66 debug_assert_eq!(
68 prepare_mutator_packets.len(),
69 <C::VM as VMBinding>::VMActivePlan::number_of_mutators()
70 );
71 mmtk.scheduler.work_buckets[WorkBucketStage::Prepare].bulk_add(prepare_mutator_packets);
72 }
73
74 for w in &mmtk.scheduler.worker_group.workers_shared {
75 let result = w.designated_work.push(Box::new(PrepareCollector));
76 debug_assert!(result.is_ok());
77 }
78 }
79}
80
81pub struct PrepareMutator<VM: VMBinding> {
83 pub mutator: &'static mut Mutator<VM>,
86}
87
88impl<VM: VMBinding> PrepareMutator<VM> {
89 pub fn new(mutator: &'static mut Mutator<VM>) -> Self {
90 Self { mutator }
91 }
92}
93
94impl<VM: VMBinding> GCWork<VM> for PrepareMutator<VM> {
95 fn do_work(&mut self, worker: &mut GCWorker<VM>, _mmtk: &'static MMTK<VM>) {
96 trace!("Prepare Mutator");
97 self.mutator.prepare(worker.tls);
98 }
99}
100
101#[derive(Default)]
103pub struct PrepareCollector;
104
105impl<VM: VMBinding> GCWork<VM> for PrepareCollector {
106 fn do_work(&mut self, worker: &mut GCWorker<VM>, mmtk: &'static MMTK<VM>) {
107 trace!("Prepare Collector");
108 worker.get_copy_context_mut().prepare();
109 mmtk.get_plan().prepare_worker(worker);
110 }
111}
112
113pub struct Release<C: GCWorkContext> {
121 pub plan: *const C::PlanType,
122}
123
124impl<C: GCWorkContext> Release<C> {
125 pub fn new(plan: *const C::PlanType) -> Self {
126 Self { plan }
127 }
128}
129
130unsafe impl<C: GCWorkContext> Send for Release<C> {}
131
132impl<C: GCWorkContext + 'static> GCWork<C::VM> for Release<C> {
133 fn do_work(&mut self, worker: &mut GCWorker<C::VM>, mmtk: &'static MMTK<C::VM>) {
134 trace!("Release Global");
135
136 mmtk.gc_trigger.policy.on_gc_release(mmtk);
137 let plan_mut: &mut C::PlanType = unsafe { &mut *(self.plan as *const _ as *mut _) };
140 plan_mut.release(worker.tls);
141
142 let release_mutator_packets = <C::VM as VMBinding>::VMActivePlan::mutators()
143 .map(|mutator| Box::new(ReleaseMutator::<C::VM>::new(mutator)) as _)
144 .collect::<Vec<_>>();
145 debug_assert_eq!(
147 release_mutator_packets.len(),
148 <C::VM as VMBinding>::VMActivePlan::number_of_mutators()
149 );
150 mmtk.scheduler.work_buckets[WorkBucketStage::Release].bulk_add(release_mutator_packets);
151
152 for w in &mmtk.scheduler.worker_group.workers_shared {
153 let result = w.designated_work.push(Box::new(ReleaseCollector));
154 debug_assert!(result.is_ok());
155 }
156 }
157}
158
159pub struct ReleaseMutator<VM: VMBinding> {
161 pub mutator: &'static mut Mutator<VM>,
164}
165
166impl<VM: VMBinding> ReleaseMutator<VM> {
167 pub fn new(mutator: &'static mut Mutator<VM>) -> Self {
168 Self { mutator }
169 }
170}
171
172impl<VM: VMBinding> GCWork<VM> for ReleaseMutator<VM> {
173 fn do_work(&mut self, worker: &mut GCWorker<VM>, _mmtk: &'static MMTK<VM>) {
174 trace!("Release Mutator");
175 self.mutator.release(worker.tls);
176 }
177}
178
179#[derive(Default)]
181pub struct ReleaseCollector;
182
183impl<VM: VMBinding> GCWork<VM> for ReleaseCollector {
184 fn do_work(&mut self, worker: &mut GCWorker<VM>, _mmtk: &'static MMTK<VM>) {
185 trace!("Release Collector");
186 worker.get_copy_context_mut().release();
187 }
188}
189
190#[derive(Default)]
194pub struct StopMutators<C: GCWorkContext> {
195 skip_mutator_roots: bool,
198 flush_mutator: bool,
200 phantom: PhantomData<C>,
201}
202
203impl<C: GCWorkContext> StopMutators<C> {
204 pub fn new() -> Self {
205 Self {
206 skip_mutator_roots: false,
207 flush_mutator: false,
208 phantom: PhantomData,
209 }
210 }
211
212 pub fn new_no_scan_roots() -> Self {
214 Self {
215 skip_mutator_roots: true,
216 flush_mutator: true,
217 phantom: PhantomData,
218 }
219 }
220}
221
222impl<C: GCWorkContext> GCWork<C::VM> for StopMutators<C> {
223 fn do_work(&mut self, worker: &mut GCWorker<C::VM>, mmtk: &'static MMTK<C::VM>) {
224 trace!("stop_all_mutators start");
225 mmtk.state.prepare_for_stack_scanning();
226 <C::VM as VMBinding>::VMCollection::stop_all_mutators(worker.tls, |mutator| {
227 if self.flush_mutator {
231 mutator.flush();
232 }
233 if !self.skip_mutator_roots {
234 mmtk.scheduler.work_buckets[WorkBucketStage::Prepare]
235 .add(ScanMutatorRoots::<C>(mutator));
236 }
237 });
238 trace!("stop_all_mutators end");
239 mmtk.get_plan().notify_mutators_paused(&mmtk.scheduler);
240 mmtk.scheduler.notify_mutators_paused(mmtk);
241 mmtk.scheduler.work_buckets[WorkBucketStage::Prepare].add(ScanVMSpecificRoots::<C>::new());
242 }
243}
244
245pub(crate) struct ProcessEdgesWorkTracer<E: ProcessEdgesWork> {
248 process_edges_work: E,
249 stage: WorkBucketStage,
250}
251
252impl<E: ProcessEdgesWork> ObjectTracer for ProcessEdgesWorkTracer<E> {
253 fn trace_object(&mut self, object: ObjectReference) -> ObjectReference {
256 let result = self.process_edges_work.trace_object(object);
257 self.flush_if_full();
258 result
259 }
260}
261
262impl<E: ProcessEdgesWork> ProcessEdgesWorkTracer<E> {
263 fn flush_if_full(&mut self) {
264 if self.process_edges_work.nodes.is_full() {
265 self.flush();
266 }
267 }
268
269 pub fn flush_if_not_empty(&mut self) {
270 if !self.process_edges_work.nodes.is_empty() {
271 self.flush();
272 }
273 }
274
275 fn flush(&mut self) {
276 let next_nodes = self.process_edges_work.pop_nodes();
277 assert!(!next_nodes.is_empty());
278 if let Some(work_packet) = self.process_edges_work.create_scan_work(next_nodes) {
279 let worker = self.process_edges_work.worker();
280 worker.scheduler().work_buckets[self.stage].add(work_packet);
281 }
282 }
283}
284
285pub(crate) struct ProcessEdgesWorkTracerContext<E: ProcessEdgesWork> {
290 stage: WorkBucketStage,
291 phantom_data: PhantomData<E>,
292}
293
294impl<E: ProcessEdgesWork> Clone for ProcessEdgesWorkTracerContext<E> {
295 fn clone(&self) -> Self {
296 Self { ..*self }
297 }
298}
299
300impl<E: ProcessEdgesWork> ObjectTracerContext<E::VM> for ProcessEdgesWorkTracerContext<E> {
301 type TracerType = ProcessEdgesWorkTracer<E>;
302
303 fn with_tracer<R, F>(&self, worker: &mut GCWorker<E::VM>, func: F) -> R
304 where
305 F: FnOnce(&mut Self::TracerType) -> R,
306 {
307 let mmtk = worker.mmtk;
308
309 let mut process_edges_work = E::new(vec![], false, mmtk, self.stage);
311 process_edges_work.set_worker(worker);
314
315 let mut tracer = ProcessEdgesWorkTracer {
317 process_edges_work,
318 stage: self.stage,
319 };
320
321 let result = func(&mut tracer);
323
324 tracer.flush_if_not_empty();
326
327 result
328 }
329}
330
331pub struct VMProcessWeakRefs<E: ProcessEdgesWork> {
339 phantom_data: PhantomData<E>,
340}
341
342impl<E: ProcessEdgesWork> VMProcessWeakRefs<E> {
343 pub fn new() -> Self {
344 Self {
345 phantom_data: PhantomData,
346 }
347 }
348}
349
350impl<E: ProcessEdgesWork> GCWork<E::VM> for VMProcessWeakRefs<E> {
351 fn do_work(&mut self, worker: &mut GCWorker<E::VM>, _mmtk: &'static MMTK<E::VM>) {
352 trace!("VMProcessWeakRefs");
353
354 let stage = WorkBucketStage::VMRefClosure;
355
356 let need_to_repeat = {
357 let tracer_factory = ProcessEdgesWorkTracerContext::<E> {
358 stage,
359 phantom_data: PhantomData,
360 };
361 <E::VM as VMBinding>::VMScanning::process_weak_refs(worker, tracer_factory)
362 };
363
364 if need_to_repeat {
365 let new_self = Box::new(Self::new());
368
369 worker.scheduler().work_buckets[stage].set_sentinel(new_self);
370 }
371 }
372}
373
374pub struct VMForwardWeakRefs<E: ProcessEdgesWork> {
382 phantom_data: PhantomData<E>,
383}
384
385impl<E: ProcessEdgesWork> VMForwardWeakRefs<E> {
386 pub fn new() -> Self {
387 Self {
388 phantom_data: PhantomData,
389 }
390 }
391}
392
393impl<E: ProcessEdgesWork> GCWork<E::VM> for VMForwardWeakRefs<E> {
394 fn do_work(&mut self, worker: &mut GCWorker<E::VM>, _mmtk: &'static MMTK<E::VM>) {
395 trace!("VMForwardWeakRefs");
396
397 let stage = WorkBucketStage::VMRefForwarding;
398
399 let tracer_factory = ProcessEdgesWorkTracerContext::<E> {
400 stage,
401 phantom_data: PhantomData,
402 };
403 <E::VM as VMBinding>::VMScanning::forward_weak_refs(worker, tracer_factory)
404 }
405}
406
407#[derive(Default)]
414pub struct VMPostForwarding<VM: VMBinding> {
415 phantom_data: PhantomData<VM>,
416}
417
418impl<VM: VMBinding> GCWork<VM> for VMPostForwarding<VM> {
419 fn do_work(&mut self, worker: &mut GCWorker<VM>, _mmtk: &'static MMTK<VM>) {
420 trace!("VMPostForwarding start");
421 <VM as VMBinding>::VMCollection::post_forwarding(worker.tls);
422 trace!("VMPostForwarding end");
423 }
424}
425
426pub struct ScanMutatorRoots<C: GCWorkContext>(pub &'static mut Mutator<C::VM>);
427
428impl<C: GCWorkContext> GCWork<C::VM> for ScanMutatorRoots<C> {
429 fn do_work(&mut self, worker: &mut GCWorker<C::VM>, mmtk: &'static MMTK<C::VM>) {
430 trace!("ScanMutatorRoots for mutator {:?}", self.0.get_tls());
431 let mutators = <C::VM as VMBinding>::VMActivePlan::number_of_mutators();
432 let factory = ProcessEdgesWorkRootsWorkFactory::<
433 C::VM,
434 C::DefaultProcessEdges,
435 C::PinningProcessEdges,
436 >::new(mmtk);
437 <C::VM as VMBinding>::VMScanning::scan_roots_in_mutator_thread(
438 worker.tls,
439 unsafe { &mut *(self.0 as *mut _) },
440 factory,
441 );
442 self.0.flush();
443
444 if mmtk.state.inform_stack_scanned(mutators) {
445 <C::VM as VMBinding>::VMScanning::notify_initial_thread_scan_complete(
446 false, worker.tls,
447 );
448 mmtk.set_gc_status(GcStatus::GcProper);
449 }
450 }
451}
452
453#[derive(Default)]
454pub struct ScanVMSpecificRoots<C: GCWorkContext>(PhantomData<C>);
455
456impl<C: GCWorkContext> ScanVMSpecificRoots<C> {
457 pub fn new() -> Self {
458 Self(PhantomData)
459 }
460}
461
462impl<C: GCWorkContext> GCWork<C::VM> for ScanVMSpecificRoots<C> {
463 fn do_work(&mut self, worker: &mut GCWorker<C::VM>, mmtk: &'static MMTK<C::VM>) {
464 trace!("ScanStaticRoots");
465 let factory = ProcessEdgesWorkRootsWorkFactory::<
466 C::VM,
467 C::DefaultProcessEdges,
468 C::PinningProcessEdges,
469 >::new(mmtk);
470 <C::VM as VMBinding>::VMScanning::scan_vm_specific_roots(worker.tls, factory);
471 }
472}
473
474pub struct ProcessEdgesBase<VM: VMBinding> {
475 pub slots: Vec<VM::VMSlot>,
476 pub nodes: VectorObjectQueue,
477 mmtk: &'static MMTK<VM>,
478 worker: *mut GCWorker<VM>,
481 pub roots: bool,
482 pub bucket: WorkBucketStage,
483}
484
485unsafe impl<VM: VMBinding> Send for ProcessEdgesBase<VM> {}
486
487impl<VM: VMBinding> ProcessEdgesBase<VM> {
488 pub fn new(
491 slots: Vec<VM::VMSlot>,
492 roots: bool,
493 mmtk: &'static MMTK<VM>,
494 bucket: WorkBucketStage,
495 ) -> Self {
496 #[cfg(feature = "extreme_assertions")]
497 if crate::util::slot_logger::should_check_duplicate_slots(mmtk.get_plan()) {
498 for slot in &slots {
499 mmtk.slot_logger.log_slot(*slot);
501 }
502 }
503 Self {
504 slots,
505 nodes: VectorObjectQueue::new(),
506 mmtk,
507 worker: std::ptr::null_mut(),
508 roots,
509 bucket,
510 }
511 }
512 pub fn set_worker(&mut self, worker: &mut GCWorker<VM>) {
513 self.worker = worker;
514 }
515
516 pub fn worker(&self) -> &'static mut GCWorker<VM> {
517 unsafe { &mut *self.worker }
518 }
519
520 pub fn mmtk(&self) -> &'static MMTK<VM> {
521 self.mmtk
522 }
523
524 pub fn plan(&self) -> &'static dyn Plan<VM = VM> {
525 self.mmtk.get_plan()
526 }
527
528 pub fn pop_nodes(&mut self) -> Vec<ObjectReference> {
530 self.nodes.take()
531 }
532
533 pub fn is_roots(&self) -> bool {
534 self.roots
535 }
536}
537
538pub type SlotOf<E> = <<E as ProcessEdgesWork>::VM as VMBinding>::VMSlot;
540
541pub trait ProcessEdgesWork:
564 Send + 'static + Sized + DerefMut + Deref<Target = ProcessEdgesBase<Self::VM>>
565{
566 type VM: VMBinding;
568
569 type ScanObjectsWorkType: ScanObjectsWork<Self::VM>;
571
572 const CAPACITY: usize = EDGES_WORK_BUFFER_SIZE;
579 const OVERWRITE_REFERENCE: bool = true;
581 const SCAN_OBJECTS_IMMEDIATELY: bool = true;
584
585 fn new(
593 slots: Vec<SlotOf<Self>>,
594 roots: bool,
595 mmtk: &'static MMTK<Self::VM>,
596 bucket: WorkBucketStage,
597 ) -> Self;
598
599 fn trace_object(&mut self, object: ObjectReference) -> ObjectReference;
604
605 #[cfg(feature = "sanity")]
608 fn cache_roots_for_sanity_gc(&mut self) {
609 assert!(self.roots);
610 self.mmtk()
611 .sanity_checker
612 .lock()
613 .unwrap()
614 .add_root_slots(self.slots.clone());
615 }
616
617 fn start_or_dispatch_scan_work(&mut self, mut work_packet: impl GCWork<Self::VM>) {
620 if Self::SCAN_OBJECTS_IMMEDIATELY {
621 work_packet.do_work(self.worker(), self.mmtk);
627 } else {
628 debug_assert!(self.bucket != WorkBucketStage::Unconstrained);
629 self.mmtk.scheduler.work_buckets[self.bucket].add(work_packet);
630 }
631 }
632
633 fn create_scan_work(&self, nodes: Vec<ObjectReference>) -> Option<Self::ScanObjectsWorkType>;
644
645 fn flush(&mut self) {
648 let nodes = self.pop_nodes();
649 if !nodes.is_empty() {
650 if let Some(work_packet) = self.create_scan_work(nodes.clone()) {
651 self.start_or_dispatch_scan_work(work_packet);
652 }
653 }
654 }
655
656 fn process_slot(&mut self, slot: SlotOf<Self>) {
659 let Some(object) = slot.load() else {
660 return;
662 };
663 let new_object = self.trace_object(object);
664 if Self::OVERWRITE_REFERENCE && new_object != object {
665 slot.store(new_object);
666 }
667 }
668
669 fn process_slots(&mut self) {
671 probe!(mmtk, process_slots, self.slots.len(), self.is_roots());
672 for i in 0..self.slots.len() {
673 self.process_slot(self.slots[i])
674 }
675 }
676}
677
678impl<E: ProcessEdgesWork> GCWork<E::VM> for E {
679 fn do_work(&mut self, worker: &mut GCWorker<E::VM>, _mmtk: &'static MMTK<E::VM>) {
680 self.set_worker(worker);
681 self.process_slots();
682 if !self.nodes.is_empty() {
683 self.flush();
684 }
685 #[cfg(feature = "sanity")]
686 if self.roots && !_mmtk.is_in_sanity() {
687 self.cache_roots_for_sanity_gc();
688 }
689 trace!("ProcessEdgesWork End");
690 }
691}
692
693#[allow(dead_code)]
703pub struct SFTProcessEdges<VM: VMBinding> {
704 pub base: ProcessEdgesBase<VM>,
705}
706
707impl<VM: VMBinding> ProcessEdgesWork for SFTProcessEdges<VM> {
708 type VM = VM;
709 type ScanObjectsWorkType = ScanObjects<Self>;
710
711 fn new(
712 slots: Vec<SlotOf<Self>>,
713 roots: bool,
714 mmtk: &'static MMTK<VM>,
715 bucket: WorkBucketStage,
716 ) -> Self {
717 let base = ProcessEdgesBase::new(slots, roots, mmtk, bucket);
718 Self { base }
719 }
720
721 fn trace_object(&mut self, object: ObjectReference) -> ObjectReference {
722 use crate::policy::sft::GCWorkerMutRef;
723
724 let worker = GCWorkerMutRef::new(self.worker());
726
727 let sft = unsafe { crate::mmtk::SFT_MAP.get_unchecked(object.to_raw_address()) };
729 sft.sft_trace_object(&mut self.base.nodes, object, worker)
730 }
731
732 fn create_scan_work(&self, nodes: Vec<ObjectReference>) -> Option<ScanObjects<Self>> {
733 Some(ScanObjects::<Self>::new(nodes, false, self.bucket))
734 }
735}
736
737pub(crate) struct ProcessEdgesWorkRootsWorkFactory<
741 VM: VMBinding,
742 DPE: ProcessEdgesWork<VM = VM>,
743 PPE: ProcessEdgesWork<VM = VM>,
744> {
745 mmtk: &'static MMTK<VM>,
746 phantom: PhantomData<(DPE, PPE)>,
747}
748
749impl<VM: VMBinding, DPE: ProcessEdgesWork<VM = VM>, PPE: ProcessEdgesWork<VM = VM>> Clone
750 for ProcessEdgesWorkRootsWorkFactory<VM, DPE, PPE>
751{
752 fn clone(&self) -> Self {
753 Self {
754 mmtk: self.mmtk,
755 phantom: PhantomData,
756 }
757 }
758}
759
760#[repr(usize)]
763enum RootsKind {
764 NORMAL = 0,
765 PINNING = 1,
766 TPINNING = 2,
767}
768
769impl<VM: VMBinding, DPE: ProcessEdgesWork<VM = VM>, PPE: ProcessEdgesWork<VM = VM>>
770 RootsWorkFactory<VM::VMSlot> for ProcessEdgesWorkRootsWorkFactory<VM, DPE, PPE>
771{
772 fn create_process_roots_work(&mut self, slots: Vec<VM::VMSlot>) {
773 probe!(mmtk, roots, RootsKind::NORMAL, slots.len());
782 crate::memory_manager::add_work_packet(
783 self.mmtk,
784 WorkBucketStage::Closure,
785 DPE::new(slots, true, self.mmtk, WorkBucketStage::Closure),
786 );
787 }
788
789 fn create_process_pinning_roots_work(&mut self, nodes: Vec<ObjectReference>) {
790 probe!(mmtk, roots, RootsKind::PINNING, nodes.len());
791 crate::memory_manager::add_work_packet(
794 self.mmtk,
795 WorkBucketStage::PinningRootsTrace,
796 ProcessRootNodes::<VM, PPE, DPE>::new(nodes, WorkBucketStage::Closure),
797 );
798 }
799
800 fn create_process_tpinning_roots_work(&mut self, nodes: Vec<ObjectReference>) {
801 probe!(mmtk, roots, RootsKind::TPINNING, nodes.len());
802 crate::memory_manager::add_work_packet(
803 self.mmtk,
804 WorkBucketStage::TPinningClosure,
805 ProcessRootNodes::<VM, PPE, PPE>::new(nodes, WorkBucketStage::TPinningClosure),
806 );
807 }
808}
809
810impl<VM: VMBinding, DPE: ProcessEdgesWork<VM = VM>, PPE: ProcessEdgesWork<VM = VM>>
811 ProcessEdgesWorkRootsWorkFactory<VM, DPE, PPE>
812{
813 fn new(mmtk: &'static MMTK<VM>) -> Self {
814 Self {
815 mmtk,
816 phantom: PhantomData,
817 }
818 }
819}
820
821impl<VM: VMBinding> Deref for SFTProcessEdges<VM> {
822 type Target = ProcessEdgesBase<VM>;
823 fn deref(&self) -> &Self::Target {
824 &self.base
825 }
826}
827
828impl<VM: VMBinding> DerefMut for SFTProcessEdges<VM> {
829 fn deref_mut(&mut self) -> &mut Self::Target {
830 &mut self.base
831 }
832}
833
834pub trait ScanObjectsWork<VM: VMBinding>: GCWork<VM> + Sized {
836 type E: ProcessEdgesWork<VM = VM>;
839
840 fn post_scan_object(&self, object: ObjectReference);
842
843 fn get_bucket(&self) -> WorkBucketStage;
845
846 fn do_work_common(
848 &self,
849 buffer: &[ObjectReference],
850 worker: &mut GCWorker<<Self::E as ProcessEdgesWork>::VM>,
851 mmtk: &'static MMTK<<Self::E as ProcessEdgesWork>::VM>,
852 ) {
853 let tls = worker.tls;
854
855 let objects_to_scan = buffer;
856
857 let mut scan_later = vec![];
859 {
860 let mut closure = ObjectsClosure::<Self::E>::new(worker, self.get_bucket());
861
862 if crate::util::rust_util::unlikely(*mmtk.get_options().count_live_bytes_in_gc) {
865 let mut live_bytes_stats = closure.worker.shared.live_bytes_per_space.borrow_mut();
867 for object in objects_to_scan.iter().copied() {
868 crate::scheduler::worker::GCWorkerShared::<VM>::increase_live_bytes(
869 &mut live_bytes_stats,
870 object,
871 );
872 }
873 }
874
875 for object in objects_to_scan.iter().copied() {
876 if <VM as VMBinding>::VMScanning::support_slot_enqueuing(tls, object) {
877 trace!("Scan object (slot) {}", object);
878 <VM as VMBinding>::VMScanning::scan_object(tls, object, &mut closure);
880 self.post_scan_object(object);
881 } else {
882 scan_later.push(object);
889 }
890 }
891 }
892
893 let total_objects = objects_to_scan.len();
894 let scan_and_trace = scan_later.len();
895 probe!(mmtk, scan_objects, total_objects, scan_and_trace);
896
897 if !scan_later.is_empty() {
899 let object_tracer_context = ProcessEdgesWorkTracerContext::<Self::E> {
900 stage: self.get_bucket(),
901 phantom_data: PhantomData,
902 };
903
904 object_tracer_context.with_tracer(worker, |object_tracer| {
905 for object in scan_later.iter().copied() {
907 trace!("Scan object (node) {}", object);
908 <VM as VMBinding>::VMScanning::scan_object_and_trace_edges(
909 tls,
910 object,
911 object_tracer,
912 );
913 self.post_scan_object(object);
914 }
915 });
916 }
917 }
918}
919
920pub struct ScanObjects<Edges: ProcessEdgesWork> {
928 buffer: Vec<ObjectReference>,
929 #[allow(unused)]
930 concurrent: bool,
931 phantom: PhantomData<Edges>,
932 bucket: WorkBucketStage,
933}
934
935impl<Edges: ProcessEdgesWork> ScanObjects<Edges> {
936 pub fn new(buffer: Vec<ObjectReference>, concurrent: bool, bucket: WorkBucketStage) -> Self {
937 Self {
938 buffer,
939 concurrent,
940 phantom: PhantomData,
941 bucket,
942 }
943 }
944}
945
946impl<VM: VMBinding, E: ProcessEdgesWork<VM = VM>> ScanObjectsWork<VM> for ScanObjects<E> {
947 type E = E;
948
949 fn get_bucket(&self) -> WorkBucketStage {
950 self.bucket
951 }
952
953 fn post_scan_object(&self, _object: ObjectReference) {
954 }
956}
957
958impl<E: ProcessEdgesWork> GCWork<E::VM> for ScanObjects<E> {
959 fn do_work(&mut self, worker: &mut GCWorker<E::VM>, mmtk: &'static MMTK<E::VM>) {
960 trace!("ScanObjects");
961 self.do_work_common(&self.buffer, worker, mmtk);
962 trace!("ScanObjects End");
963 }
964}
965
966use crate::mmtk::MMTK;
967use crate::plan::Plan;
968use crate::plan::PlanTraceObject;
969use crate::policy::gc_work::TraceKind;
970
971pub struct PlanProcessEdges<
974 VM: VMBinding,
975 P: Plan<VM = VM> + PlanTraceObject<VM>,
976 const KIND: TraceKind,
977> {
978 plan: &'static P,
979 base: ProcessEdgesBase<VM>,
980}
981
982impl<VM: VMBinding, P: PlanTraceObject<VM> + Plan<VM = VM>, const KIND: TraceKind> ProcessEdgesWork
983 for PlanProcessEdges<VM, P, KIND>
984{
985 type VM = VM;
986 type ScanObjectsWorkType = PlanScanObjects<Self, P>;
987
988 fn new(
989 slots: Vec<SlotOf<Self>>,
990 roots: bool,
991 mmtk: &'static MMTK<VM>,
992 bucket: WorkBucketStage,
993 ) -> Self {
994 let base = ProcessEdgesBase::new(slots, roots, mmtk, bucket);
995 let plan = base.plan().downcast_ref::<P>().unwrap();
996 Self { plan, base }
997 }
998
999 fn create_scan_work(&self, nodes: Vec<ObjectReference>) -> Option<Self::ScanObjectsWorkType> {
1000 Some(PlanScanObjects::<Self, P>::new(
1001 self.plan,
1002 nodes,
1003 false,
1004 self.bucket,
1005 ))
1006 }
1007
1008 fn trace_object(&mut self, object: ObjectReference) -> ObjectReference {
1009 let worker = self.worker();
1011 self.plan
1012 .trace_object::<VectorObjectQueue, KIND>(&mut self.base.nodes, object, worker)
1013 }
1014
1015 fn process_slot(&mut self, slot: SlotOf<Self>) {
1016 let Some(object) = slot.load() else {
1017 return;
1019 };
1020 let new_object = self.trace_object(object);
1021 if P::may_move_objects::<KIND>() && new_object != object {
1022 slot.store(new_object);
1023 }
1024 }
1025}
1026
1027impl<VM: VMBinding, P: PlanTraceObject<VM> + Plan<VM = VM>, const KIND: TraceKind> Deref
1029 for PlanProcessEdges<VM, P, KIND>
1030{
1031 type Target = ProcessEdgesBase<VM>;
1032 fn deref(&self) -> &Self::Target {
1033 &self.base
1034 }
1035}
1036
1037impl<VM: VMBinding, P: PlanTraceObject<VM> + Plan<VM = VM>, const KIND: TraceKind> DerefMut
1038 for PlanProcessEdges<VM, P, KIND>
1039{
1040 fn deref_mut(&mut self) -> &mut Self::Target {
1041 &mut self.base
1042 }
1043}
1044
1045pub struct PlanScanObjects<E: ProcessEdgesWork, P: Plan<VM = E::VM> + PlanTraceObject<E::VM>> {
1048 plan: &'static P,
1049 buffer: Vec<ObjectReference>,
1050 #[allow(dead_code)]
1051 concurrent: bool,
1052 phantom: PhantomData<E>,
1053 bucket: WorkBucketStage,
1054}
1055
1056impl<E: ProcessEdgesWork, P: Plan<VM = E::VM> + PlanTraceObject<E::VM>> PlanScanObjects<E, P> {
1057 pub fn new(
1058 plan: &'static P,
1059 buffer: Vec<ObjectReference>,
1060 concurrent: bool,
1061 bucket: WorkBucketStage,
1062 ) -> Self {
1063 Self {
1064 plan,
1065 buffer,
1066 concurrent,
1067 phantom: PhantomData,
1068 bucket,
1069 }
1070 }
1071}
1072
1073impl<E: ProcessEdgesWork, P: Plan<VM = E::VM> + PlanTraceObject<E::VM>> ScanObjectsWork<E::VM>
1074 for PlanScanObjects<E, P>
1075{
1076 type E = E;
1077
1078 fn get_bucket(&self) -> WorkBucketStage {
1079 self.bucket
1080 }
1081
1082 fn post_scan_object(&self, object: ObjectReference) {
1083 self.plan.post_scan_object(object);
1084 }
1085}
1086
1087impl<E: ProcessEdgesWork, P: Plan<VM = E::VM> + PlanTraceObject<E::VM>> GCWork<E::VM>
1088 for PlanScanObjects<E, P>
1089{
1090 fn do_work(&mut self, worker: &mut GCWorker<E::VM>, mmtk: &'static MMTK<E::VM>) {
1091 trace!("PlanScanObjects");
1092 self.do_work_common(&self.buffer, worker, mmtk);
1093 trace!("PlanScanObjects End");
1094 }
1095}
1096
1097pub(crate) struct ProcessRootNodes<
1117 VM: VMBinding,
1118 R2OPE: ProcessEdgesWork<VM = VM>,
1119 O2OPE: ProcessEdgesWork<VM = VM>,
1120> {
1121 phantom: PhantomData<(VM, R2OPE, O2OPE)>,
1122 roots: Vec<ObjectReference>,
1123 bucket: WorkBucketStage,
1124}
1125
1126impl<VM: VMBinding, R2OPE: ProcessEdgesWork<VM = VM>, O2OPE: ProcessEdgesWork<VM = VM>>
1127 ProcessRootNodes<VM, R2OPE, O2OPE>
1128{
1129 pub fn new(nodes: Vec<ObjectReference>, bucket: WorkBucketStage) -> Self {
1130 Self {
1131 phantom: PhantomData,
1132 roots: nodes,
1133 bucket,
1134 }
1135 }
1136}
1137
1138impl<VM: VMBinding, R2OPE: ProcessEdgesWork<VM = VM>, O2OPE: ProcessEdgesWork<VM = VM>> GCWork<VM>
1139 for ProcessRootNodes<VM, R2OPE, O2OPE>
1140{
1141 fn do_work(&mut self, worker: &mut GCWorker<VM>, mmtk: &'static MMTK<VM>) {
1142 trace!("ProcessRootNodes");
1143
1144 #[cfg(feature = "sanity")]
1145 {
1146 if !mmtk.is_in_sanity() {
1147 mmtk.sanity_checker
1148 .lock()
1149 .unwrap()
1150 .add_root_nodes(self.roots.clone());
1151 }
1152 }
1153
1154 let num_roots = self.roots.len();
1155
1156 let root_objects_to_scan = {
1165 let mut process_edges_work =
1167 R2OPE::new(vec![], true, mmtk, WorkBucketStage::PinningRootsTrace);
1168 process_edges_work.set_worker(worker);
1169
1170 for object in self.roots.iter().copied() {
1171 let new_object = process_edges_work.trace_object(object);
1172 debug_assert_eq!(
1173 object, new_object,
1174 "Object moved while tracing root unmovable root object: {} -> {}",
1175 object, new_object
1176 );
1177 }
1178
1179 process_edges_work.nodes.take()
1182 };
1183
1184 let num_enqueued_nodes = root_objects_to_scan.len();
1185 probe!(mmtk, process_root_nodes, num_roots, num_enqueued_nodes);
1186
1187 if !root_objects_to_scan.is_empty() {
1188 let mut process_edges_work = O2OPE::new(vec![], true, mmtk, self.bucket);
1189 process_edges_work.set_worker(worker);
1190 if let Some(work) = process_edges_work.create_scan_work(root_objects_to_scan) {
1191 crate::memory_manager::add_work_packet(mmtk, self.bucket, work);
1192 }
1193 }
1194
1195 trace!("ProcessRootNodes End");
1196 }
1197}
1198
1199#[derive(Default)]
1202pub struct UnsupportedProcessEdges<VM: VMBinding> {
1203 phantom: PhantomData<VM>,
1204}
1205
1206impl<VM: VMBinding> Deref for UnsupportedProcessEdges<VM> {
1207 type Target = ProcessEdgesBase<VM>;
1208 fn deref(&self) -> &Self::Target {
1209 panic!("unsupported!")
1210 }
1211}
1212
1213impl<VM: VMBinding> DerefMut for UnsupportedProcessEdges<VM> {
1214 fn deref_mut(&mut self) -> &mut Self::Target {
1215 panic!("unsupported!")
1216 }
1217}
1218
1219impl<VM: VMBinding> ProcessEdgesWork for UnsupportedProcessEdges<VM> {
1220 type VM = VM;
1221
1222 type ScanObjectsWorkType = ScanObjects<Self>;
1223
1224 fn new(
1225 _slots: Vec<SlotOf<Self>>,
1226 _roots: bool,
1227 _mmtk: &'static MMTK<Self::VM>,
1228 _bucket: WorkBucketStage,
1229 ) -> Self {
1230 panic!("unsupported!")
1231 }
1232
1233 fn trace_object(&mut self, _object: ObjectReference) -> ObjectReference {
1234 panic!("unsupported!")
1235 }
1236
1237 fn create_scan_work(&self, _nodes: Vec<ObjectReference>) -> Option<Self::ScanObjectsWorkType> {
1238 panic!("unsupported!")
1239 }
1240}