1use super::work_bucket::WorkBucketStage;
2use super::*;
3use crate::global_state::GcStatus;
4use crate::plan::ObjectsClosure;
5use crate::plan::VectorObjectQueue;
6use crate::util::*;
7use crate::vm::slot::Slot;
8use crate::vm::*;
9use crate::*;
10use std::marker::PhantomData;
11use std::ops::{Deref, DerefMut};
12
13pub struct ScheduleCollection;
14
15impl<VM: VMBinding> GCWork<VM> for ScheduleCollection {
16 fn do_work(&mut self, worker: &mut GCWorker<VM>, mmtk: &'static MMTK<VM>) {
17 mmtk.gc_trigger.policy.on_gc_start(mmtk);
19
20 let is_emergency = mmtk.state.set_collection_kind(
22 mmtk.get_plan().last_collection_was_exhaustive(),
23 mmtk.gc_trigger.policy.can_heap_size_grow(),
24 );
25 if is_emergency {
26 mmtk.get_plan().notify_emergency_collection();
27 }
28 mmtk.set_gc_status(GcStatus::GcPrepare);
30
31 mmtk.get_plan().schedule_collection(worker.scheduler());
33 }
34}
35
36pub struct Prepare<C: GCWorkContext> {
44 pub plan: *const C::PlanType,
45}
46
47unsafe impl<C: GCWorkContext> Send for Prepare<C> {}
48
49impl<C: GCWorkContext> Prepare<C> {
50 pub fn new(plan: *const C::PlanType) -> Self {
51 Self { plan }
52 }
53}
54
55impl<C: GCWorkContext> GCWork<C::VM> for Prepare<C> {
56 fn do_work(&mut self, worker: &mut GCWorker<C::VM>, mmtk: &'static MMTK<C::VM>) {
57 trace!("Prepare Global");
58 let plan_mut: &mut C::PlanType = unsafe { &mut *(self.plan as *const _ as *mut _) };
60 plan_mut.prepare(worker.tls);
61
62 if plan_mut.constraints().needs_prepare_mutator {
63 let prepare_mutator_packets = <C::VM as VMBinding>::VMActivePlan::mutators()
64 .map(|mutator| Box::new(PrepareMutator::<C::VM>::new(mutator)) as _)
65 .collect::<Vec<_>>();
66 debug_assert_eq!(
68 prepare_mutator_packets.len(),
69 <C::VM as VMBinding>::VMActivePlan::number_of_mutators()
70 );
71 mmtk.scheduler.work_buckets[WorkBucketStage::Prepare].bulk_add(prepare_mutator_packets);
72 }
73
74 for w in &mmtk.scheduler.worker_group.workers_shared {
75 let result = w.designated_work.push(Box::new(PrepareCollector));
76 debug_assert!(result.is_ok());
77 }
78 }
79}
80
81pub struct PrepareMutator<VM: VMBinding> {
83 pub mutator: &'static mut Mutator<VM>,
86}
87
88impl<VM: VMBinding> PrepareMutator<VM> {
89 pub fn new(mutator: &'static mut Mutator<VM>) -> Self {
90 Self { mutator }
91 }
92}
93
94impl<VM: VMBinding> GCWork<VM> for PrepareMutator<VM> {
95 fn do_work(&mut self, worker: &mut GCWorker<VM>, _mmtk: &'static MMTK<VM>) {
96 trace!("Prepare Mutator");
97 self.mutator.prepare(worker.tls);
98 }
99}
100
101#[derive(Default)]
103pub struct PrepareCollector;
104
105impl<VM: VMBinding> GCWork<VM> for PrepareCollector {
106 fn do_work(&mut self, worker: &mut GCWorker<VM>, mmtk: &'static MMTK<VM>) {
107 trace!("Prepare Collector");
108 worker.get_copy_context_mut().prepare();
109 mmtk.get_plan().prepare_worker(worker);
110 }
111}
112
113pub struct Release<C: GCWorkContext> {
121 pub plan: *const C::PlanType,
122}
123
124impl<C: GCWorkContext> Release<C> {
125 pub fn new(plan: *const C::PlanType) -> Self {
126 Self { plan }
127 }
128}
129
130unsafe impl<C: GCWorkContext> Send for Release<C> {}
131
132impl<C: GCWorkContext + 'static> GCWork<C::VM> for Release<C> {
133 fn do_work(&mut self, worker: &mut GCWorker<C::VM>, mmtk: &'static MMTK<C::VM>) {
134 trace!("Release Global");
135
136 mmtk.gc_trigger.policy.on_gc_release(mmtk);
137 let plan_mut: &mut C::PlanType = unsafe { &mut *(self.plan as *const _ as *mut _) };
140 plan_mut.release(worker.tls);
141
142 let release_mutator_packets = <C::VM as VMBinding>::VMActivePlan::mutators()
143 .map(|mutator| Box::new(ReleaseMutator::<C::VM>::new(mutator)) as _)
144 .collect::<Vec<_>>();
145 debug_assert_eq!(
147 release_mutator_packets.len(),
148 <C::VM as VMBinding>::VMActivePlan::number_of_mutators()
149 );
150 mmtk.scheduler.work_buckets[WorkBucketStage::Release].bulk_add(release_mutator_packets);
151
152 for w in &mmtk.scheduler.worker_group.workers_shared {
153 let result = w.designated_work.push(Box::new(ReleaseCollector));
154 debug_assert!(result.is_ok());
155 }
156 }
157}
158
159pub struct ReleaseMutator<VM: VMBinding> {
161 pub mutator: &'static mut Mutator<VM>,
164}
165
166impl<VM: VMBinding> ReleaseMutator<VM> {
167 pub fn new(mutator: &'static mut Mutator<VM>) -> Self {
168 Self { mutator }
169 }
170}
171
172impl<VM: VMBinding> GCWork<VM> for ReleaseMutator<VM> {
173 fn do_work(&mut self, worker: &mut GCWorker<VM>, _mmtk: &'static MMTK<VM>) {
174 trace!("Release Mutator");
175 self.mutator.release(worker.tls);
176 }
177}
178
179#[derive(Default)]
181pub struct ReleaseCollector;
182
183impl<VM: VMBinding> GCWork<VM> for ReleaseCollector {
184 fn do_work(&mut self, worker: &mut GCWorker<VM>, _mmtk: &'static MMTK<VM>) {
185 trace!("Release Collector");
186 worker.get_copy_context_mut().release();
187 }
188}
189
190#[derive(Default)]
194pub struct StopMutators<C: GCWorkContext> {
195 skip_roots: bool,
198 flush_mutator: bool,
200 phantom: PhantomData<C>,
201}
202
203impl<C: GCWorkContext> StopMutators<C> {
204 pub fn new() -> Self {
205 Self {
206 skip_roots: false,
207 flush_mutator: false,
208 phantom: PhantomData,
209 }
210 }
211
212 pub fn new_no_scan_roots() -> Self {
214 Self {
215 skip_roots: true,
216 flush_mutator: true,
217 phantom: PhantomData,
218 }
219 }
220}
221
222impl<C: GCWorkContext> GCWork<C::VM> for StopMutators<C> {
223 fn do_work(&mut self, worker: &mut GCWorker<C::VM>, mmtk: &'static MMTK<C::VM>) {
224 trace!("stop_all_mutators start");
225 mmtk.state.prepare_for_stack_scanning();
226 <C::VM as VMBinding>::VMCollection::stop_all_mutators(worker.tls, |mutator| {
227 if self.flush_mutator {
231 mutator.flush();
232 }
233 if !self.skip_roots {
234 mmtk.scheduler.work_buckets[WorkBucketStage::Prepare]
235 .add(ScanMutatorRoots::<C>(mutator));
236 }
237 });
238 trace!("stop_all_mutators end");
239 mmtk.get_plan().notify_mutators_paused(&mmtk.scheduler);
240 mmtk.scheduler.notify_mutators_paused(mmtk);
241 if !self.skip_roots {
242 mmtk.scheduler.work_buckets[WorkBucketStage::Prepare]
243 .add(ScanVMSpecificRoots::<C>::new());
244 }
245 }
246}
247
248pub(crate) struct ProcessEdgesWorkTracer<E: ProcessEdgesWork> {
251 process_edges_work: E,
252 stage: WorkBucketStage,
253}
254
255impl<E: ProcessEdgesWork> ObjectTracer for ProcessEdgesWorkTracer<E> {
256 fn trace_object(&mut self, object: ObjectReference) -> ObjectReference {
259 let result = self.process_edges_work.trace_object(object);
260 self.flush_if_full();
261 result
262 }
263}
264
265impl<E: ProcessEdgesWork> ProcessEdgesWorkTracer<E> {
266 fn flush_if_full(&mut self) {
267 if self.process_edges_work.nodes.is_full() {
268 self.flush();
269 }
270 }
271
272 pub fn flush_if_not_empty(&mut self) {
273 if !self.process_edges_work.nodes.is_empty() {
274 self.flush();
275 }
276 }
277
278 fn flush(&mut self) {
279 let next_nodes = self.process_edges_work.pop_nodes();
280 assert!(!next_nodes.is_empty());
281 if let Some(work_packet) = self.process_edges_work.create_scan_work(next_nodes) {
282 let worker = self.process_edges_work.worker();
283 worker.scheduler().work_buckets[self.stage].add(work_packet);
284 }
285 }
286}
287
288pub(crate) struct ProcessEdgesWorkTracerContext<E: ProcessEdgesWork> {
293 stage: WorkBucketStage,
294 phantom_data: PhantomData<E>,
295}
296
297impl<E: ProcessEdgesWork> Clone for ProcessEdgesWorkTracerContext<E> {
298 fn clone(&self) -> Self {
299 Self { ..*self }
300 }
301}
302
303impl<E: ProcessEdgesWork> ObjectTracerContext<E::VM> for ProcessEdgesWorkTracerContext<E> {
304 type TracerType = ProcessEdgesWorkTracer<E>;
305
306 fn with_tracer<R, F>(&self, worker: &mut GCWorker<E::VM>, func: F) -> R
307 where
308 F: FnOnce(&mut Self::TracerType) -> R,
309 {
310 let mmtk = worker.mmtk;
311
312 let mut process_edges_work = E::new(vec![], false, mmtk, self.stage);
314 process_edges_work.set_worker(worker);
317
318 let mut tracer = ProcessEdgesWorkTracer {
320 process_edges_work,
321 stage: self.stage,
322 };
323
324 let result = func(&mut tracer);
326
327 tracer.flush_if_not_empty();
329
330 result
331 }
332}
333
334pub struct VMProcessWeakRefs<E: ProcessEdgesWork> {
342 phantom_data: PhantomData<E>,
343}
344
345impl<E: ProcessEdgesWork> VMProcessWeakRefs<E> {
346 pub fn new() -> Self {
347 Self {
348 phantom_data: PhantomData,
349 }
350 }
351}
352
353impl<E: ProcessEdgesWork> GCWork<E::VM> for VMProcessWeakRefs<E> {
354 fn do_work(&mut self, worker: &mut GCWorker<E::VM>, _mmtk: &'static MMTK<E::VM>) {
355 trace!("VMProcessWeakRefs");
356
357 let stage = WorkBucketStage::VMRefClosure;
358
359 let need_to_repeat = {
360 let tracer_factory = ProcessEdgesWorkTracerContext::<E> {
361 stage,
362 phantom_data: PhantomData,
363 };
364 <E::VM as VMBinding>::VMScanning::process_weak_refs(worker, tracer_factory)
365 };
366
367 if need_to_repeat {
368 let new_self = Box::new(Self::new());
371
372 worker.scheduler().work_buckets[stage].set_sentinel(new_self);
373 }
374 }
375}
376
377pub struct VMForwardWeakRefs<E: ProcessEdgesWork> {
385 phantom_data: PhantomData<E>,
386}
387
388impl<E: ProcessEdgesWork> VMForwardWeakRefs<E> {
389 pub fn new() -> Self {
390 Self {
391 phantom_data: PhantomData,
392 }
393 }
394}
395
396impl<E: ProcessEdgesWork> GCWork<E::VM> for VMForwardWeakRefs<E> {
397 fn do_work(&mut self, worker: &mut GCWorker<E::VM>, _mmtk: &'static MMTK<E::VM>) {
398 trace!("VMForwardWeakRefs");
399
400 let stage = WorkBucketStage::VMRefForwarding;
401
402 let tracer_factory = ProcessEdgesWorkTracerContext::<E> {
403 stage,
404 phantom_data: PhantomData,
405 };
406 <E::VM as VMBinding>::VMScanning::forward_weak_refs(worker, tracer_factory)
407 }
408}
409
410#[derive(Default)]
417pub struct VMPostForwarding<VM: VMBinding> {
418 phantom_data: PhantomData<VM>,
419}
420
421impl<VM: VMBinding> GCWork<VM> for VMPostForwarding<VM> {
422 fn do_work(&mut self, worker: &mut GCWorker<VM>, _mmtk: &'static MMTK<VM>) {
423 trace!("VMPostForwarding start");
424 <VM as VMBinding>::VMCollection::post_forwarding(worker.tls);
425 trace!("VMPostForwarding end");
426 }
427}
428
429pub struct ScanMutatorRoots<C: GCWorkContext>(pub &'static mut Mutator<C::VM>);
430
431impl<C: GCWorkContext> GCWork<C::VM> for ScanMutatorRoots<C> {
432 fn do_work(&mut self, worker: &mut GCWorker<C::VM>, mmtk: &'static MMTK<C::VM>) {
433 trace!("ScanMutatorRoots for mutator {:?}", self.0.get_tls());
434 let mutators = <C::VM as VMBinding>::VMActivePlan::number_of_mutators();
435 let factory = ProcessEdgesWorkRootsWorkFactory::<
436 C::VM,
437 C::DefaultProcessEdges,
438 C::PinningProcessEdges,
439 >::new(mmtk);
440 <C::VM as VMBinding>::VMScanning::scan_roots_in_mutator_thread(
441 worker.tls,
442 unsafe { &mut *(self.0 as *mut _) },
443 factory,
444 );
445 self.0.flush();
446
447 if mmtk.state.inform_stack_scanned(mutators) {
448 <C::VM as VMBinding>::VMScanning::notify_initial_thread_scan_complete(
449 false, worker.tls,
450 );
451 mmtk.set_gc_status(GcStatus::GcProper);
452 }
453 }
454}
455
456#[derive(Default)]
457pub struct ScanVMSpecificRoots<C: GCWorkContext>(PhantomData<C>);
458
459impl<C: GCWorkContext> ScanVMSpecificRoots<C> {
460 pub fn new() -> Self {
461 Self(PhantomData)
462 }
463}
464
465impl<C: GCWorkContext> GCWork<C::VM> for ScanVMSpecificRoots<C> {
466 fn do_work(&mut self, worker: &mut GCWorker<C::VM>, mmtk: &'static MMTK<C::VM>) {
467 trace!("ScanStaticRoots");
468 let factory = ProcessEdgesWorkRootsWorkFactory::<
469 C::VM,
470 C::DefaultProcessEdges,
471 C::PinningProcessEdges,
472 >::new(mmtk);
473 <C::VM as VMBinding>::VMScanning::scan_vm_specific_roots(worker.tls, factory);
474 }
475}
476
477pub struct ProcessEdgesBase<VM: VMBinding> {
478 pub slots: Vec<VM::VMSlot>,
479 pub nodes: VectorObjectQueue,
480 mmtk: &'static MMTK<VM>,
481 worker: *mut GCWorker<VM>,
484 pub roots: bool,
485 pub bucket: WorkBucketStage,
486}
487
488unsafe impl<VM: VMBinding> Send for ProcessEdgesBase<VM> {}
489
490impl<VM: VMBinding> ProcessEdgesBase<VM> {
491 pub fn new(
494 slots: Vec<VM::VMSlot>,
495 roots: bool,
496 mmtk: &'static MMTK<VM>,
497 bucket: WorkBucketStage,
498 ) -> Self {
499 #[cfg(feature = "extreme_assertions")]
500 if crate::util::slot_logger::should_check_duplicate_slots(mmtk.get_plan()) {
501 for slot in &slots {
502 mmtk.slot_logger.log_slot(*slot);
504 }
505 }
506 Self {
507 slots,
508 nodes: VectorObjectQueue::new(),
509 mmtk,
510 worker: std::ptr::null_mut(),
511 roots,
512 bucket,
513 }
514 }
515 pub fn set_worker(&mut self, worker: &mut GCWorker<VM>) {
516 self.worker = worker;
517 }
518
519 pub fn worker(&self) -> &'static mut GCWorker<VM> {
520 unsafe { &mut *self.worker }
521 }
522
523 pub fn mmtk(&self) -> &'static MMTK<VM> {
524 self.mmtk
525 }
526
527 pub fn plan(&self) -> &'static dyn Plan<VM = VM> {
528 self.mmtk.get_plan()
529 }
530
531 pub fn pop_nodes(&mut self) -> Vec<ObjectReference> {
533 self.nodes.take()
534 }
535
536 pub fn is_roots(&self) -> bool {
537 self.roots
538 }
539}
540
541pub type SlotOf<E> = <<E as ProcessEdgesWork>::VM as VMBinding>::VMSlot;
543
544pub trait ProcessEdgesWork:
567 Send + 'static + Sized + DerefMut + Deref<Target = ProcessEdgesBase<Self::VM>>
568{
569 type VM: VMBinding;
571
572 type ScanObjectsWorkType: ScanObjectsWork<Self::VM>;
574
575 const CAPACITY: usize = EDGES_WORK_BUFFER_SIZE;
582 const OVERWRITE_REFERENCE: bool = true;
584 const SCAN_OBJECTS_IMMEDIATELY: bool = true;
587
588 fn new(
596 slots: Vec<SlotOf<Self>>,
597 roots: bool,
598 mmtk: &'static MMTK<Self::VM>,
599 bucket: WorkBucketStage,
600 ) -> Self;
601
602 fn trace_object(&mut self, object: ObjectReference) -> ObjectReference;
607
608 #[cfg(feature = "sanity")]
611 fn cache_roots_for_sanity_gc(&mut self) {
612 assert!(self.roots);
613 self.mmtk()
614 .sanity_checker
615 .lock()
616 .unwrap()
617 .add_root_slots(self.slots.clone());
618 }
619
620 fn start_or_dispatch_scan_work(&mut self, mut work_packet: impl GCWork<Self::VM>) {
623 if Self::SCAN_OBJECTS_IMMEDIATELY {
624 work_packet.do_work(self.worker(), self.mmtk);
630 } else {
631 debug_assert!(self.bucket != WorkBucketStage::Unconstrained);
632 self.mmtk.scheduler.work_buckets[self.bucket].add(work_packet);
633 }
634 }
635
636 fn create_scan_work(&self, nodes: Vec<ObjectReference>) -> Option<Self::ScanObjectsWorkType>;
647
648 fn flush(&mut self) {
651 let nodes = self.pop_nodes();
652 if !nodes.is_empty() {
653 if let Some(work_packet) = self.create_scan_work(nodes.clone()) {
654 self.start_or_dispatch_scan_work(work_packet);
655 }
656 }
657 }
658
659 fn process_slot(&mut self, slot: SlotOf<Self>) {
662 let Some(object) = slot.load() else {
663 return;
665 };
666 let new_object = self.trace_object(object);
667 if Self::OVERWRITE_REFERENCE && new_object != object {
668 slot.store(new_object);
669 }
670 }
671
672 fn process_slots(&mut self) {
674 probe!(mmtk, process_slots, self.slots.len(), self.is_roots());
675 for i in 0..self.slots.len() {
676 self.process_slot(self.slots[i])
677 }
678 }
679}
680
681impl<E: ProcessEdgesWork> GCWork<E::VM> for E {
682 fn do_work(&mut self, worker: &mut GCWorker<E::VM>, _mmtk: &'static MMTK<E::VM>) {
683 self.set_worker(worker);
684 self.process_slots();
685 if !self.nodes.is_empty() {
686 self.flush();
687 }
688 #[cfg(feature = "sanity")]
689 if self.roots && !_mmtk.is_in_sanity() {
690 self.cache_roots_for_sanity_gc();
691 }
692 trace!("ProcessEdgesWork End");
693 }
694}
695
696#[allow(dead_code)]
706pub struct SFTProcessEdges<VM: VMBinding> {
707 pub base: ProcessEdgesBase<VM>,
708}
709
710impl<VM: VMBinding> ProcessEdgesWork for SFTProcessEdges<VM> {
711 type VM = VM;
712 type ScanObjectsWorkType = ScanObjects<Self>;
713
714 fn new(
715 slots: Vec<SlotOf<Self>>,
716 roots: bool,
717 mmtk: &'static MMTK<VM>,
718 bucket: WorkBucketStage,
719 ) -> Self {
720 let base = ProcessEdgesBase::new(slots, roots, mmtk, bucket);
721 Self { base }
722 }
723
724 fn trace_object(&mut self, object: ObjectReference) -> ObjectReference {
725 use crate::policy::sft::GCWorkerMutRef;
726
727 let worker = GCWorkerMutRef::new(self.worker());
729
730 let sft = unsafe { crate::mmtk::SFT_MAP.get_unchecked(object.to_raw_address()) };
732 sft.sft_trace_object(&mut self.base.nodes, object, worker)
733 }
734
735 fn create_scan_work(&self, nodes: Vec<ObjectReference>) -> Option<ScanObjects<Self>> {
736 Some(ScanObjects::<Self>::new(nodes, false, self.bucket))
737 }
738}
739
740pub(crate) struct ProcessEdgesWorkRootsWorkFactory<
744 VM: VMBinding,
745 DPE: ProcessEdgesWork<VM = VM>,
746 PPE: ProcessEdgesWork<VM = VM>,
747> {
748 mmtk: &'static MMTK<VM>,
749 phantom: PhantomData<(DPE, PPE)>,
750}
751
752impl<VM: VMBinding, DPE: ProcessEdgesWork<VM = VM>, PPE: ProcessEdgesWork<VM = VM>> Clone
753 for ProcessEdgesWorkRootsWorkFactory<VM, DPE, PPE>
754{
755 fn clone(&self) -> Self {
756 Self {
757 mmtk: self.mmtk,
758 phantom: PhantomData,
759 }
760 }
761}
762
763#[repr(usize)]
766enum RootsKind {
767 NORMAL = 0,
768 PINNING = 1,
769 TPINNING = 2,
770}
771
772impl<VM: VMBinding, DPE: ProcessEdgesWork<VM = VM>, PPE: ProcessEdgesWork<VM = VM>>
773 RootsWorkFactory<VM::VMSlot> for ProcessEdgesWorkRootsWorkFactory<VM, DPE, PPE>
774{
775 fn create_process_roots_work(&mut self, slots: Vec<VM::VMSlot>) {
776 probe!(mmtk, roots, RootsKind::NORMAL, slots.len());
785 crate::memory_manager::add_work_packet(
786 self.mmtk,
787 WorkBucketStage::Closure,
788 DPE::new(slots, true, self.mmtk, WorkBucketStage::Closure),
789 );
790 }
791
792 fn create_process_pinning_roots_work(&mut self, nodes: Vec<ObjectReference>) {
793 probe!(mmtk, roots, RootsKind::PINNING, nodes.len());
794 crate::memory_manager::add_work_packet(
797 self.mmtk,
798 WorkBucketStage::PinningRootsTrace,
799 ProcessRootNodes::<VM, PPE, DPE>::new(nodes, WorkBucketStage::Closure),
800 );
801 }
802
803 fn create_process_tpinning_roots_work(&mut self, nodes: Vec<ObjectReference>) {
804 probe!(mmtk, roots, RootsKind::TPINNING, nodes.len());
805 crate::memory_manager::add_work_packet(
806 self.mmtk,
807 WorkBucketStage::TPinningClosure,
808 ProcessRootNodes::<VM, PPE, PPE>::new(nodes, WorkBucketStage::TPinningClosure),
809 );
810 }
811}
812
813impl<VM: VMBinding, DPE: ProcessEdgesWork<VM = VM>, PPE: ProcessEdgesWork<VM = VM>>
814 ProcessEdgesWorkRootsWorkFactory<VM, DPE, PPE>
815{
816 fn new(mmtk: &'static MMTK<VM>) -> Self {
817 Self {
818 mmtk,
819 phantom: PhantomData,
820 }
821 }
822}
823
824impl<VM: VMBinding> Deref for SFTProcessEdges<VM> {
825 type Target = ProcessEdgesBase<VM>;
826 fn deref(&self) -> &Self::Target {
827 &self.base
828 }
829}
830
831impl<VM: VMBinding> DerefMut for SFTProcessEdges<VM> {
832 fn deref_mut(&mut self) -> &mut Self::Target {
833 &mut self.base
834 }
835}
836
837pub trait ScanObjectsWork<VM: VMBinding>: GCWork<VM> + Sized {
839 type E: ProcessEdgesWork<VM = VM>;
842
843 fn post_scan_object(&self, object: ObjectReference);
845
846 fn get_bucket(&self) -> WorkBucketStage;
848
849 fn do_work_common(
851 &self,
852 buffer: &[ObjectReference],
853 worker: &mut GCWorker<<Self::E as ProcessEdgesWork>::VM>,
854 mmtk: &'static MMTK<<Self::E as ProcessEdgesWork>::VM>,
855 ) {
856 let tls = worker.tls;
857
858 let objects_to_scan = buffer;
859
860 let mut scan_later = vec![];
862 {
863 let mut closure = ObjectsClosure::<Self::E>::new(worker, self.get_bucket());
864
865 if crate::util::rust_util::unlikely(*mmtk.get_options().count_live_bytes_in_gc) {
868 let mut live_bytes_stats = closure.worker.shared.live_bytes_per_space.borrow_mut();
870 for object in objects_to_scan.iter().copied() {
871 crate::scheduler::worker::GCWorkerShared::<VM>::increase_live_bytes(
872 &mut live_bytes_stats,
873 object,
874 );
875 }
876 }
877
878 for object in objects_to_scan.iter().copied() {
879 if <VM as VMBinding>::VMScanning::support_slot_enqueuing(tls, object) {
880 trace!("Scan object (slot) {}", object);
881 <VM as VMBinding>::VMScanning::scan_object(tls, object, &mut closure);
883 self.post_scan_object(object);
884 } else {
885 scan_later.push(object);
892 }
893 }
894 }
895
896 let total_objects = objects_to_scan.len();
897 let scan_and_trace = scan_later.len();
898 probe!(mmtk, scan_objects, total_objects, scan_and_trace);
899
900 if !scan_later.is_empty() {
902 let object_tracer_context = ProcessEdgesWorkTracerContext::<Self::E> {
903 stage: self.get_bucket(),
904 phantom_data: PhantomData,
905 };
906
907 object_tracer_context.with_tracer(worker, |object_tracer| {
908 for object in scan_later.iter().copied() {
910 trace!("Scan object (node) {}", object);
911 <VM as VMBinding>::VMScanning::scan_object_and_trace_edges(
912 tls,
913 object,
914 object_tracer,
915 );
916 self.post_scan_object(object);
917 }
918 });
919 }
920 }
921}
922
923pub struct ScanObjects<Edges: ProcessEdgesWork> {
931 buffer: Vec<ObjectReference>,
932 #[allow(unused)]
933 concurrent: bool,
934 phantom: PhantomData<Edges>,
935 bucket: WorkBucketStage,
936}
937
938impl<Edges: ProcessEdgesWork> ScanObjects<Edges> {
939 pub fn new(buffer: Vec<ObjectReference>, concurrent: bool, bucket: WorkBucketStage) -> Self {
940 Self {
941 buffer,
942 concurrent,
943 phantom: PhantomData,
944 bucket,
945 }
946 }
947}
948
949impl<VM: VMBinding, E: ProcessEdgesWork<VM = VM>> ScanObjectsWork<VM> for ScanObjects<E> {
950 type E = E;
951
952 fn get_bucket(&self) -> WorkBucketStage {
953 self.bucket
954 }
955
956 fn post_scan_object(&self, _object: ObjectReference) {
957 }
959}
960
961impl<E: ProcessEdgesWork> GCWork<E::VM> for ScanObjects<E> {
962 fn do_work(&mut self, worker: &mut GCWorker<E::VM>, mmtk: &'static MMTK<E::VM>) {
963 trace!("ScanObjects");
964 self.do_work_common(&self.buffer, worker, mmtk);
965 trace!("ScanObjects End");
966 }
967}
968
969use crate::mmtk::MMTK;
970use crate::plan::Plan;
971use crate::plan::PlanTraceObject;
972use crate::policy::gc_work::TraceKind;
973
974pub struct PlanProcessEdges<
977 VM: VMBinding,
978 P: Plan<VM = VM> + PlanTraceObject<VM>,
979 const KIND: TraceKind,
980> {
981 plan: &'static P,
982 base: ProcessEdgesBase<VM>,
983}
984
985impl<VM: VMBinding, P: PlanTraceObject<VM> + Plan<VM = VM>, const KIND: TraceKind> ProcessEdgesWork
986 for PlanProcessEdges<VM, P, KIND>
987{
988 type VM = VM;
989 type ScanObjectsWorkType = PlanScanObjects<Self, P>;
990
991 fn new(
992 slots: Vec<SlotOf<Self>>,
993 roots: bool,
994 mmtk: &'static MMTK<VM>,
995 bucket: WorkBucketStage,
996 ) -> Self {
997 let base = ProcessEdgesBase::new(slots, roots, mmtk, bucket);
998 let plan = base.plan().downcast_ref::<P>().unwrap();
999 Self { plan, base }
1000 }
1001
1002 fn create_scan_work(&self, nodes: Vec<ObjectReference>) -> Option<Self::ScanObjectsWorkType> {
1003 Some(PlanScanObjects::<Self, P>::new(
1004 self.plan,
1005 nodes,
1006 false,
1007 self.bucket,
1008 ))
1009 }
1010
1011 fn trace_object(&mut self, object: ObjectReference) -> ObjectReference {
1012 let worker = self.worker();
1014 self.plan
1015 .trace_object::<VectorObjectQueue, KIND>(&mut self.base.nodes, object, worker)
1016 }
1017
1018 fn process_slot(&mut self, slot: SlotOf<Self>) {
1019 let Some(object) = slot.load() else {
1020 return;
1022 };
1023 let new_object = self.trace_object(object);
1024 if P::may_move_objects::<KIND>() && new_object != object {
1025 slot.store(new_object);
1026 }
1027 }
1028}
1029
1030impl<VM: VMBinding, P: PlanTraceObject<VM> + Plan<VM = VM>, const KIND: TraceKind> Deref
1032 for PlanProcessEdges<VM, P, KIND>
1033{
1034 type Target = ProcessEdgesBase<VM>;
1035 fn deref(&self) -> &Self::Target {
1036 &self.base
1037 }
1038}
1039
1040impl<VM: VMBinding, P: PlanTraceObject<VM> + Plan<VM = VM>, const KIND: TraceKind> DerefMut
1041 for PlanProcessEdges<VM, P, KIND>
1042{
1043 fn deref_mut(&mut self) -> &mut Self::Target {
1044 &mut self.base
1045 }
1046}
1047
1048pub struct PlanScanObjects<E: ProcessEdgesWork, P: Plan<VM = E::VM> + PlanTraceObject<E::VM>> {
1051 plan: &'static P,
1052 buffer: Vec<ObjectReference>,
1053 #[allow(dead_code)]
1054 concurrent: bool,
1055 phantom: PhantomData<E>,
1056 bucket: WorkBucketStage,
1057}
1058
1059impl<E: ProcessEdgesWork, P: Plan<VM = E::VM> + PlanTraceObject<E::VM>> PlanScanObjects<E, P> {
1060 pub fn new(
1061 plan: &'static P,
1062 buffer: Vec<ObjectReference>,
1063 concurrent: bool,
1064 bucket: WorkBucketStage,
1065 ) -> Self {
1066 Self {
1067 plan,
1068 buffer,
1069 concurrent,
1070 phantom: PhantomData,
1071 bucket,
1072 }
1073 }
1074}
1075
1076impl<E: ProcessEdgesWork, P: Plan<VM = E::VM> + PlanTraceObject<E::VM>> ScanObjectsWork<E::VM>
1077 for PlanScanObjects<E, P>
1078{
1079 type E = E;
1080
1081 fn get_bucket(&self) -> WorkBucketStage {
1082 self.bucket
1083 }
1084
1085 fn post_scan_object(&self, object: ObjectReference) {
1086 self.plan.post_scan_object(object);
1087 }
1088}
1089
1090impl<E: ProcessEdgesWork, P: Plan<VM = E::VM> + PlanTraceObject<E::VM>> GCWork<E::VM>
1091 for PlanScanObjects<E, P>
1092{
1093 fn do_work(&mut self, worker: &mut GCWorker<E::VM>, mmtk: &'static MMTK<E::VM>) {
1094 trace!("PlanScanObjects");
1095 self.do_work_common(&self.buffer, worker, mmtk);
1096 trace!("PlanScanObjects End");
1097 }
1098}
1099
1100pub(crate) struct ProcessRootNodes<
1120 VM: VMBinding,
1121 R2OPE: ProcessEdgesWork<VM = VM>,
1122 O2OPE: ProcessEdgesWork<VM = VM>,
1123> {
1124 phantom: PhantomData<(VM, R2OPE, O2OPE)>,
1125 roots: Vec<ObjectReference>,
1126 bucket: WorkBucketStage,
1127}
1128
1129impl<VM: VMBinding, R2OPE: ProcessEdgesWork<VM = VM>, O2OPE: ProcessEdgesWork<VM = VM>>
1130 ProcessRootNodes<VM, R2OPE, O2OPE>
1131{
1132 pub fn new(nodes: Vec<ObjectReference>, bucket: WorkBucketStage) -> Self {
1133 Self {
1134 phantom: PhantomData,
1135 roots: nodes,
1136 bucket,
1137 }
1138 }
1139}
1140
1141impl<VM: VMBinding, R2OPE: ProcessEdgesWork<VM = VM>, O2OPE: ProcessEdgesWork<VM = VM>> GCWork<VM>
1142 for ProcessRootNodes<VM, R2OPE, O2OPE>
1143{
1144 fn do_work(&mut self, worker: &mut GCWorker<VM>, mmtk: &'static MMTK<VM>) {
1145 trace!("ProcessRootNodes");
1146
1147 #[cfg(feature = "sanity")]
1148 {
1149 if !mmtk.is_in_sanity() {
1150 mmtk.sanity_checker
1151 .lock()
1152 .unwrap()
1153 .add_root_nodes(self.roots.clone());
1154 }
1155 }
1156
1157 let num_roots = self.roots.len();
1158
1159 let root_objects_to_scan = {
1168 let mut process_edges_work =
1170 R2OPE::new(vec![], true, mmtk, WorkBucketStage::PinningRootsTrace);
1171 process_edges_work.set_worker(worker);
1172
1173 for object in self.roots.iter().copied() {
1174 let new_object = process_edges_work.trace_object(object);
1175 debug_assert_eq!(
1176 object, new_object,
1177 "Object moved while tracing root unmovable root object: {} -> {}",
1178 object, new_object
1179 );
1180 }
1181
1182 process_edges_work.nodes.take()
1185 };
1186
1187 let num_enqueued_nodes = root_objects_to_scan.len();
1188 probe!(mmtk, process_root_nodes, num_roots, num_enqueued_nodes);
1189
1190 if !root_objects_to_scan.is_empty() {
1191 let mut process_edges_work = O2OPE::new(vec![], true, mmtk, self.bucket);
1192 process_edges_work.set_worker(worker);
1193 if let Some(work) = process_edges_work.create_scan_work(root_objects_to_scan) {
1194 crate::memory_manager::add_work_packet(mmtk, self.bucket, work);
1195 }
1196 }
1197
1198 trace!("ProcessRootNodes End");
1199 }
1200}
1201
1202#[derive(Default)]
1205pub struct UnsupportedProcessEdges<VM: VMBinding> {
1206 phantom: PhantomData<VM>,
1207}
1208
1209impl<VM: VMBinding> Deref for UnsupportedProcessEdges<VM> {
1210 type Target = ProcessEdgesBase<VM>;
1211 fn deref(&self) -> &Self::Target {
1212 panic!("unsupported!")
1213 }
1214}
1215
1216impl<VM: VMBinding> DerefMut for UnsupportedProcessEdges<VM> {
1217 fn deref_mut(&mut self) -> &mut Self::Target {
1218 panic!("unsupported!")
1219 }
1220}
1221
1222impl<VM: VMBinding> ProcessEdgesWork for UnsupportedProcessEdges<VM> {
1223 type VM = VM;
1224
1225 type ScanObjectsWorkType = ScanObjects<Self>;
1226
1227 fn new(
1228 _slots: Vec<SlotOf<Self>>,
1229 _roots: bool,
1230 _mmtk: &'static MMTK<Self::VM>,
1231 _bucket: WorkBucketStage,
1232 ) -> Self {
1233 panic!("unsupported!")
1234 }
1235
1236 fn trace_object(&mut self, _object: ObjectReference) -> ObjectReference {
1237 panic!("unsupported!")
1238 }
1239
1240 fn create_scan_work(&self, _nodes: Vec<ObjectReference>) -> Option<Self::ScanObjectsWorkType> {
1241 panic!("unsupported!")
1242 }
1243}