mmtk/scheduler/
gc_work.rs

1use super::work_bucket::WorkBucketStage;
2use super::*;
3use crate::global_state::GcStatus;
4use crate::plan::ObjectsClosure;
5use crate::plan::VectorObjectQueue;
6use crate::util::*;
7use crate::vm::slot::Slot;
8use crate::vm::*;
9use crate::*;
10use std::marker::PhantomData;
11use std::ops::{Deref, DerefMut};
12
13pub struct ScheduleCollection;
14
15impl<VM: VMBinding> GCWork<VM> for ScheduleCollection {
16    fn do_work(&mut self, worker: &mut GCWorker<VM>, mmtk: &'static MMTK<VM>) {
17        // Tell GC trigger that GC started.
18        mmtk.gc_trigger.policy.on_gc_start(mmtk);
19
20        // Determine collection kind
21        let is_emergency = mmtk.state.set_collection_kind(
22            mmtk.get_plan().last_collection_was_exhaustive(),
23            mmtk.gc_trigger.policy.can_heap_size_grow(),
24        );
25        if is_emergency {
26            mmtk.get_plan().notify_emergency_collection();
27        }
28        // Set to GcPrepare
29        mmtk.set_gc_status(GcStatus::GcPrepare);
30
31        // Let the plan to schedule collection work
32        mmtk.get_plan().schedule_collection(worker.scheduler());
33    }
34}
35
36/// The global GC Preparation Work
37/// This work packet invokes prepare() for the plan (which will invoke prepare() for each space), and
38/// pushes work packets for preparing mutators and collectors.
39/// We should only have one such work packet per GC, before any actual GC work starts.
40/// We assume this work packet is the only running work packet that accesses plan, and there should
41/// be no other concurrent work packet that accesses plan (read or write). Otherwise, there may
42/// be a race condition.
43pub struct Prepare<C: GCWorkContext> {
44    pub plan: *const C::PlanType,
45}
46
47unsafe impl<C: GCWorkContext> Send for Prepare<C> {}
48
49impl<C: GCWorkContext> Prepare<C> {
50    pub fn new(plan: *const C::PlanType) -> Self {
51        Self { plan }
52    }
53}
54
55impl<C: GCWorkContext> GCWork<C::VM> for Prepare<C> {
56    fn do_work(&mut self, worker: &mut GCWorker<C::VM>, mmtk: &'static MMTK<C::VM>) {
57        trace!("Prepare Global");
58        // We assume this is the only running work packet that accesses plan at the point of execution
59        let plan_mut: &mut C::PlanType = unsafe { &mut *(self.plan as *const _ as *mut _) };
60        plan_mut.prepare(worker.tls);
61
62        if plan_mut.constraints().needs_prepare_mutator {
63            let prepare_mutator_packets = <C::VM as VMBinding>::VMActivePlan::mutators()
64                .map(|mutator| Box::new(PrepareMutator::<C::VM>::new(mutator)) as _)
65                .collect::<Vec<_>>();
66            // Just in case the VM binding is inconsistent about the number of mutators and the actual mutator list.
67            debug_assert_eq!(
68                prepare_mutator_packets.len(),
69                <C::VM as VMBinding>::VMActivePlan::number_of_mutators()
70            );
71            mmtk.scheduler.work_buckets[WorkBucketStage::Prepare].bulk_add(prepare_mutator_packets);
72        }
73
74        for w in &mmtk.scheduler.worker_group.workers_shared {
75            let result = w.designated_work.push(Box::new(PrepareCollector));
76            debug_assert!(result.is_ok());
77        }
78    }
79}
80
81/// The mutator GC Preparation Work
82pub struct PrepareMutator<VM: VMBinding> {
83    // The mutator reference has static lifetime.
84    // It is safe because the actual lifetime of this work-packet will not exceed the lifetime of a GC.
85    pub mutator: &'static mut Mutator<VM>,
86}
87
88impl<VM: VMBinding> PrepareMutator<VM> {
89    pub fn new(mutator: &'static mut Mutator<VM>) -> Self {
90        Self { mutator }
91    }
92}
93
94impl<VM: VMBinding> GCWork<VM> for PrepareMutator<VM> {
95    fn do_work(&mut self, worker: &mut GCWorker<VM>, _mmtk: &'static MMTK<VM>) {
96        trace!("Prepare Mutator");
97        self.mutator.prepare(worker.tls);
98    }
99}
100
101/// The collector GC Preparation Work
102#[derive(Default)]
103pub struct PrepareCollector;
104
105impl<VM: VMBinding> GCWork<VM> for PrepareCollector {
106    fn do_work(&mut self, worker: &mut GCWorker<VM>, mmtk: &'static MMTK<VM>) {
107        trace!("Prepare Collector");
108        worker.get_copy_context_mut().prepare();
109        mmtk.get_plan().prepare_worker(worker);
110    }
111}
112
113/// The global GC release Work
114/// This work packet invokes release() for the plan (which will invoke release() for each space), and
115/// pushes work packets for releasing mutators and collectors.
116/// We should only have one such work packet per GC, after all actual GC work ends.
117/// We assume this work packet is the only running work packet that accesses plan, and there should
118/// be no other concurrent work packet that accesses plan (read or write). Otherwise, there may
119/// be a race condition.
120pub struct Release<C: GCWorkContext> {
121    pub plan: *const C::PlanType,
122}
123
124impl<C: GCWorkContext> Release<C> {
125    pub fn new(plan: *const C::PlanType) -> Self {
126        Self { plan }
127    }
128}
129
130unsafe impl<C: GCWorkContext> Send for Release<C> {}
131
132impl<C: GCWorkContext + 'static> GCWork<C::VM> for Release<C> {
133    fn do_work(&mut self, worker: &mut GCWorker<C::VM>, mmtk: &'static MMTK<C::VM>) {
134        trace!("Release Global");
135
136        mmtk.gc_trigger.policy.on_gc_release(mmtk);
137        // We assume this is the only running work packet that accesses plan at the point of execution
138
139        let plan_mut: &mut C::PlanType = unsafe { &mut *(self.plan as *const _ as *mut _) };
140        plan_mut.release(worker.tls);
141
142        let release_mutator_packets = <C::VM as VMBinding>::VMActivePlan::mutators()
143            .map(|mutator| Box::new(ReleaseMutator::<C::VM>::new(mutator)) as _)
144            .collect::<Vec<_>>();
145        // Just in case the VM binding is inconsistent about the number of mutators and the actual mutator list.
146        debug_assert_eq!(
147            release_mutator_packets.len(),
148            <C::VM as VMBinding>::VMActivePlan::number_of_mutators()
149        );
150        mmtk.scheduler.work_buckets[WorkBucketStage::Release].bulk_add(release_mutator_packets);
151
152        for w in &mmtk.scheduler.worker_group.workers_shared {
153            let result = w.designated_work.push(Box::new(ReleaseCollector));
154            debug_assert!(result.is_ok());
155        }
156    }
157}
158
159/// The mutator release Work
160pub struct ReleaseMutator<VM: VMBinding> {
161    // The mutator reference has static lifetime.
162    // It is safe because the actual lifetime of this work-packet will not exceed the lifetime of a GC.
163    pub mutator: &'static mut Mutator<VM>,
164}
165
166impl<VM: VMBinding> ReleaseMutator<VM> {
167    pub fn new(mutator: &'static mut Mutator<VM>) -> Self {
168        Self { mutator }
169    }
170}
171
172impl<VM: VMBinding> GCWork<VM> for ReleaseMutator<VM> {
173    fn do_work(&mut self, worker: &mut GCWorker<VM>, _mmtk: &'static MMTK<VM>) {
174        trace!("Release Mutator");
175        self.mutator.release(worker.tls);
176    }
177}
178
179/// The collector release Work
180#[derive(Default)]
181pub struct ReleaseCollector;
182
183impl<VM: VMBinding> GCWork<VM> for ReleaseCollector {
184    fn do_work(&mut self, worker: &mut GCWorker<VM>, _mmtk: &'static MMTK<VM>) {
185        trace!("Release Collector");
186        worker.get_copy_context_mut().release();
187    }
188}
189
190/// Stop all mutators
191///
192/// TODO: Smaller work granularity
193#[derive(Default)]
194pub struct StopMutators<C: GCWorkContext> {
195    /// If this is true, we skip creating root-scanning work packets.
196    /// By default, this is false.
197    skip_roots: bool,
198    /// Flush mutators once they are stopped. By default this is false. [`ScanMutatorRoots`] will flush mutators.
199    flush_mutator: bool,
200    phantom: PhantomData<C>,
201}
202
203impl<C: GCWorkContext> StopMutators<C> {
204    pub fn new() -> Self {
205        Self {
206            skip_roots: false,
207            flush_mutator: false,
208            phantom: PhantomData,
209        }
210    }
211
212    /// Create a `StopMutators` work packet that does not create any root-scanning work packets, and will simply flush mutators.
213    pub fn new_no_scan_roots() -> Self {
214        Self {
215            skip_roots: true,
216            flush_mutator: true,
217            phantom: PhantomData,
218        }
219    }
220}
221
222impl<C: GCWorkContext> GCWork<C::VM> for StopMutators<C> {
223    fn do_work(&mut self, worker: &mut GCWorker<C::VM>, mmtk: &'static MMTK<C::VM>) {
224        trace!("stop_all_mutators start");
225        mmtk.state.prepare_for_stack_scanning();
226        <C::VM as VMBinding>::VMCollection::stop_all_mutators(worker.tls, |mutator| {
227            // TODO: The stack scanning work won't start immediately, as the `Prepare` bucket is not opened yet (the bucket is opened in notify_mutators_paused).
228            // Should we push to Unconstrained instead?
229
230            if self.flush_mutator {
231                mutator.flush();
232            }
233            if !self.skip_roots {
234                mmtk.scheduler.work_buckets[WorkBucketStage::Prepare]
235                    .add(ScanMutatorRoots::<C>(mutator));
236            }
237        });
238        trace!("stop_all_mutators end");
239        mmtk.get_plan().notify_mutators_paused(&mmtk.scheduler);
240        mmtk.scheduler.notify_mutators_paused(mmtk);
241        if !self.skip_roots {
242            mmtk.scheduler.work_buckets[WorkBucketStage::Prepare]
243                .add(ScanVMSpecificRoots::<C>::new());
244        }
245    }
246}
247
248/// This implements `ObjectTracer` by forwarding the `trace_object` calls to the wrapped
249/// `ProcessEdgesWork` instance.
250pub(crate) struct ProcessEdgesWorkTracer<E: ProcessEdgesWork> {
251    process_edges_work: E,
252    stage: WorkBucketStage,
253}
254
255impl<E: ProcessEdgesWork> ObjectTracer for ProcessEdgesWorkTracer<E> {
256    /// Forward the `trace_object` call to the underlying `ProcessEdgesWork`,
257    /// and flush as soon as the underlying buffer of `process_edges_work` is full.
258    fn trace_object(&mut self, object: ObjectReference) -> ObjectReference {
259        let result = self.process_edges_work.trace_object(object);
260        self.flush_if_full();
261        result
262    }
263}
264
265impl<E: ProcessEdgesWork> ProcessEdgesWorkTracer<E> {
266    fn flush_if_full(&mut self) {
267        if self.process_edges_work.nodes.is_full() {
268            self.flush();
269        }
270    }
271
272    pub fn flush_if_not_empty(&mut self) {
273        if !self.process_edges_work.nodes.is_empty() {
274            self.flush();
275        }
276    }
277
278    fn flush(&mut self) {
279        let next_nodes = self.process_edges_work.pop_nodes();
280        assert!(!next_nodes.is_empty());
281        if let Some(work_packet) = self.process_edges_work.create_scan_work(next_nodes) {
282            let worker = self.process_edges_work.worker();
283            worker.scheduler().work_buckets[self.stage].add(work_packet);
284        }
285    }
286}
287
288/// This type implements `ObjectTracerContext` by creating a temporary `ProcessEdgesWork` during
289/// the call to `with_tracer`, making use of its `trace_object` method.  It then creates work
290/// packets using the methods of the `ProcessEdgesWork` and add the work packet into the given
291/// `stage`.
292pub(crate) struct ProcessEdgesWorkTracerContext<E: ProcessEdgesWork> {
293    stage: WorkBucketStage,
294    phantom_data: PhantomData<E>,
295}
296
297impl<E: ProcessEdgesWork> Clone for ProcessEdgesWorkTracerContext<E> {
298    fn clone(&self) -> Self {
299        Self { ..*self }
300    }
301}
302
303impl<E: ProcessEdgesWork> ObjectTracerContext<E::VM> for ProcessEdgesWorkTracerContext<E> {
304    type TracerType = ProcessEdgesWorkTracer<E>;
305
306    fn with_tracer<R, F>(&self, worker: &mut GCWorker<E::VM>, func: F) -> R
307    where
308        F: FnOnce(&mut Self::TracerType) -> R,
309    {
310        let mmtk = worker.mmtk;
311
312        // Prepare the underlying ProcessEdgesWork
313        let mut process_edges_work = E::new(vec![], false, mmtk, self.stage);
314        // FIXME: This line allows us to omit the borrowing lifetime of worker.
315        // We should refactor ProcessEdgesWork so that it uses `worker` locally, not as a member.
316        process_edges_work.set_worker(worker);
317
318        // Cretae the tracer.
319        let mut tracer = ProcessEdgesWorkTracer {
320            process_edges_work,
321            stage: self.stage,
322        };
323
324        // The caller can use the tracer here.
325        let result = func(&mut tracer);
326
327        // Flush the queued nodes.
328        tracer.flush_if_not_empty();
329
330        result
331    }
332}
333
334/// Delegate to the VM binding for weak reference processing.
335///
336/// Some VMs (e.g. v8) do not have a Java-like global weak reference storage, and the
337/// processing of those weakrefs may be more complex. For such case, we delegate to the
338/// VM binding to process weak references.
339///
340/// NOTE: This will replace `{Soft,Weak,Phantom}RefProcessing` and `Finalization` in the future.
341pub struct VMProcessWeakRefs<E: ProcessEdgesWork> {
342    phantom_data: PhantomData<E>,
343}
344
345impl<E: ProcessEdgesWork> VMProcessWeakRefs<E> {
346    pub fn new() -> Self {
347        Self {
348            phantom_data: PhantomData,
349        }
350    }
351}
352
353impl<E: ProcessEdgesWork> GCWork<E::VM> for VMProcessWeakRefs<E> {
354    fn do_work(&mut self, worker: &mut GCWorker<E::VM>, _mmtk: &'static MMTK<E::VM>) {
355        trace!("VMProcessWeakRefs");
356
357        let stage = WorkBucketStage::VMRefClosure;
358
359        let need_to_repeat = {
360            let tracer_factory = ProcessEdgesWorkTracerContext::<E> {
361                stage,
362                phantom_data: PhantomData,
363            };
364            <E::VM as VMBinding>::VMScanning::process_weak_refs(worker, tracer_factory)
365        };
366
367        if need_to_repeat {
368            // Schedule Self as the new sentinel so we'll call `process_weak_refs` again after the
369            // current transitive closure.
370            let new_self = Box::new(Self::new());
371
372            worker.scheduler().work_buckets[stage].set_sentinel(new_self);
373        }
374    }
375}
376
377/// Delegate to the VM binding for forwarding weak references.
378///
379/// Some VMs (e.g. v8) do not have a Java-like global weak reference storage, and the
380/// processing of those weakrefs may be more complex. For such case, we delegate to the
381/// VM binding to process weak references.
382///
383/// NOTE: This will replace `RefForwarding` and `ForwardFinalization` in the future.
384pub struct VMForwardWeakRefs<E: ProcessEdgesWork> {
385    phantom_data: PhantomData<E>,
386}
387
388impl<E: ProcessEdgesWork> VMForwardWeakRefs<E> {
389    pub fn new() -> Self {
390        Self {
391            phantom_data: PhantomData,
392        }
393    }
394}
395
396impl<E: ProcessEdgesWork> GCWork<E::VM> for VMForwardWeakRefs<E> {
397    fn do_work(&mut self, worker: &mut GCWorker<E::VM>, _mmtk: &'static MMTK<E::VM>) {
398        trace!("VMForwardWeakRefs");
399
400        let stage = WorkBucketStage::VMRefForwarding;
401
402        let tracer_factory = ProcessEdgesWorkTracerContext::<E> {
403            stage,
404            phantom_data: PhantomData,
405        };
406        <E::VM as VMBinding>::VMScanning::forward_weak_refs(worker, tracer_factory)
407    }
408}
409
410/// This work packet calls `Collection::post_forwarding`.
411///
412/// NOTE: This will replace `RefEnqueue` in the future.
413///
414/// NOTE: Although this work packet runs in parallel with the `Release` work packet, it does not
415/// access the `Plan` instance.
416#[derive(Default)]
417pub struct VMPostForwarding<VM: VMBinding> {
418    phantom_data: PhantomData<VM>,
419}
420
421impl<VM: VMBinding> GCWork<VM> for VMPostForwarding<VM> {
422    fn do_work(&mut self, worker: &mut GCWorker<VM>, _mmtk: &'static MMTK<VM>) {
423        trace!("VMPostForwarding start");
424        <VM as VMBinding>::VMCollection::post_forwarding(worker.tls);
425        trace!("VMPostForwarding end");
426    }
427}
428
429pub struct ScanMutatorRoots<C: GCWorkContext>(pub &'static mut Mutator<C::VM>);
430
431impl<C: GCWorkContext> GCWork<C::VM> for ScanMutatorRoots<C> {
432    fn do_work(&mut self, worker: &mut GCWorker<C::VM>, mmtk: &'static MMTK<C::VM>) {
433        trace!("ScanMutatorRoots for mutator {:?}", self.0.get_tls());
434        let mutators = <C::VM as VMBinding>::VMActivePlan::number_of_mutators();
435        let factory = ProcessEdgesWorkRootsWorkFactory::<
436            C::VM,
437            C::DefaultProcessEdges,
438            C::PinningProcessEdges,
439        >::new(mmtk);
440        <C::VM as VMBinding>::VMScanning::scan_roots_in_mutator_thread(
441            worker.tls,
442            unsafe { &mut *(self.0 as *mut _) },
443            factory,
444        );
445        self.0.flush();
446
447        if mmtk.state.inform_stack_scanned(mutators) {
448            <C::VM as VMBinding>::VMScanning::notify_initial_thread_scan_complete(
449                false, worker.tls,
450            );
451            mmtk.set_gc_status(GcStatus::GcProper);
452        }
453    }
454}
455
456#[derive(Default)]
457pub struct ScanVMSpecificRoots<C: GCWorkContext>(PhantomData<C>);
458
459impl<C: GCWorkContext> ScanVMSpecificRoots<C> {
460    pub fn new() -> Self {
461        Self(PhantomData)
462    }
463}
464
465impl<C: GCWorkContext> GCWork<C::VM> for ScanVMSpecificRoots<C> {
466    fn do_work(&mut self, worker: &mut GCWorker<C::VM>, mmtk: &'static MMTK<C::VM>) {
467        trace!("ScanStaticRoots");
468        let factory = ProcessEdgesWorkRootsWorkFactory::<
469            C::VM,
470            C::DefaultProcessEdges,
471            C::PinningProcessEdges,
472        >::new(mmtk);
473        <C::VM as VMBinding>::VMScanning::scan_vm_specific_roots(worker.tls, factory);
474    }
475}
476
477pub struct ProcessEdgesBase<VM: VMBinding> {
478    pub slots: Vec<VM::VMSlot>,
479    pub nodes: VectorObjectQueue,
480    mmtk: &'static MMTK<VM>,
481    // Use raw pointer for fast pointer dereferencing, instead of using `Option<&'static mut GCWorker<E::VM>>`.
482    // Because a copying gc will dereference this pointer at least once for every object copy.
483    worker: *mut GCWorker<VM>,
484    pub roots: bool,
485    pub bucket: WorkBucketStage,
486}
487
488unsafe impl<VM: VMBinding> Send for ProcessEdgesBase<VM> {}
489
490impl<VM: VMBinding> ProcessEdgesBase<VM> {
491    // Requires an MMTk reference. Each plan-specific type that uses ProcessEdgesBase can get a static plan reference
492    // at creation. This avoids overhead for dynamic dispatch or downcasting plan for each object traced.
493    pub fn new(
494        slots: Vec<VM::VMSlot>,
495        roots: bool,
496        mmtk: &'static MMTK<VM>,
497        bucket: WorkBucketStage,
498    ) -> Self {
499        #[cfg(feature = "extreme_assertions")]
500        if crate::util::slot_logger::should_check_duplicate_slots(mmtk.get_plan()) {
501            for slot in &slots {
502                // log slot, panic if already logged
503                mmtk.slot_logger.log_slot(*slot);
504            }
505        }
506        Self {
507            slots,
508            nodes: VectorObjectQueue::new(),
509            mmtk,
510            worker: std::ptr::null_mut(),
511            roots,
512            bucket,
513        }
514    }
515    pub fn set_worker(&mut self, worker: &mut GCWorker<VM>) {
516        self.worker = worker;
517    }
518
519    pub fn worker(&self) -> &'static mut GCWorker<VM> {
520        unsafe { &mut *self.worker }
521    }
522
523    pub fn mmtk(&self) -> &'static MMTK<VM> {
524        self.mmtk
525    }
526
527    pub fn plan(&self) -> &'static dyn Plan<VM = VM> {
528        self.mmtk.get_plan()
529    }
530
531    /// Pop all nodes from nodes, and clear nodes to an empty vector.
532    pub fn pop_nodes(&mut self) -> Vec<ObjectReference> {
533        self.nodes.take()
534    }
535
536    pub fn is_roots(&self) -> bool {
537        self.roots
538    }
539}
540
541/// A short-hand for `<E::VM as VMBinding>::VMSlot`.
542pub type SlotOf<E> = <<E as ProcessEdgesWork>::VM as VMBinding>::VMSlot;
543
544/// An abstract trait for work packets that process object graph edges.  Its method
545/// [`ProcessEdgesWork::trace_object`] traces an object and, upon first visit, enqueues it into an
546/// internal queue inside the `ProcessEdgesWork` instance.  Each implementation of this trait
547/// implement `trace_object` differently.  During [`Plan::schedule_collection`], plans select
548/// (usually via `GCWorkContext`) specialized implementations of this trait to be used during each
549/// trace according the nature of each trace, such as whether it is a nursery collection, whether it
550/// is a defrag collection, whether it pins objects, etc.
551///
552/// This trait was originally designed for work packets that process object graph edges represented
553/// as slots.  The constructor [`ProcessEdgesWork::new`] takes a vector of slots, and the created
554/// work packet will trace the objects pointed by the object reference in each slot using the
555/// `trace_object` method, and update the slot if the GC moves the target object when tracing.
556///
557/// This trait can also be used merely as a provider of the `trace_object` method by giving it an
558/// empty vector of slots.  This is useful for node-enqueuing tracing
559/// ([`Scanning::scan_object_and_trace_edges`]) as well as weak reference processing
560/// ([`Scanning::process_weak_refs`] as well as `ReferenceProcessor` and `FinalizableProcessor`).
561/// In those cases, the caller passes the reference to the target object to `trace_object`, an the
562/// caller is responsible for updating the slots according the return value of `trace_object`.
563///
564/// TODO: We should refactor this trait to decouple it from slots. See:
565/// <https://github.com/mmtk/mmtk-core/issues/599>
566pub trait ProcessEdgesWork:
567    Send + 'static + Sized + DerefMut + Deref<Target = ProcessEdgesBase<Self::VM>>
568{
569    /// The associate type for the VM.
570    type VM: VMBinding;
571
572    /// The work packet type for scanning objects when using this ProcessEdgesWork.
573    type ScanObjectsWorkType: ScanObjectsWork<Self::VM>;
574
575    /// The maximum number of slots that should be put to one of this work packets.
576    /// The caller who creates a work packet of this trait should be responsible to
577    /// comply with this capacity.
578    /// Higher capacity means the packet will take longer to finish, and may lead to
579    /// bad load balancing. On the other hand, lower capacity would lead to higher cost
580    /// on scheduling many small work packets. It is important to find a proper capacity.
581    const CAPACITY: usize = EDGES_WORK_BUFFER_SIZE;
582    /// Do we update object reference? This has to be true for a moving GC.
583    const OVERWRITE_REFERENCE: bool = true;
584    /// If true, we do object scanning in this work packet with the same worker without scheduling overhead.
585    /// If false, we will add object scanning work packets to the global queue and allow other workers to work on it.
586    const SCAN_OBJECTS_IMMEDIATELY: bool = true;
587
588    /// Create a [`ProcessEdgesWork`].
589    ///
590    /// Arguments:
591    /// * `slots`: a vector of slots.
592    /// * `roots`: are the objects root reachable objects?
593    /// * `mmtk`: a reference to the MMTK instance.
594    /// * `bucket`: which work bucket this packet belongs to. Further work generated from this packet will also be put to the same bucket.
595    fn new(
596        slots: Vec<SlotOf<Self>>,
597        roots: bool,
598        mmtk: &'static MMTK<Self::VM>,
599        bucket: WorkBucketStage,
600    ) -> Self;
601
602    /// Trace an MMTk object. The implementation should forward this call to the policy-specific
603    /// `trace_object()` methods, depending on which space this object is in.
604    /// If the object is not in any MMTk space, the implementation should forward the call to
605    /// `ActivePlan::vm_trace_object()` to let the binding handle the tracing.
606    fn trace_object(&mut self, object: ObjectReference) -> ObjectReference;
607
608    /// If the work includes roots, we will store the roots somewhere so for sanity GC, we can do another
609    /// transitive closure from the roots.
610    #[cfg(feature = "sanity")]
611    fn cache_roots_for_sanity_gc(&mut self) {
612        assert!(self.roots);
613        self.mmtk()
614            .sanity_checker
615            .lock()
616            .unwrap()
617            .add_root_slots(self.slots.clone());
618    }
619
620    /// Start the a scan work packet. If SCAN_OBJECTS_IMMEDIATELY, the work packet will be executed immediately, in this method.
621    /// Otherwise, the work packet will be added the Closure work bucket and will be dispatched later by the scheduler.
622    fn start_or_dispatch_scan_work(&mut self, mut work_packet: impl GCWork<Self::VM>) {
623        if Self::SCAN_OBJECTS_IMMEDIATELY {
624            // We execute this `scan_objects_work` immediately.
625            // This is expected to be a useful optimization because,
626            // say for _pmd_ with 200M heap, we're likely to have 50000~60000 `ScanObjects` work packets
627            // being dispatched (similar amount to `ProcessEdgesWork`).
628            // Executing these work packets now can remarkably reduce the global synchronization time.
629            work_packet.do_work(self.worker(), self.mmtk);
630        } else {
631            debug_assert!(self.bucket != WorkBucketStage::Unconstrained);
632            self.mmtk.scheduler.work_buckets[self.bucket].add(work_packet);
633        }
634    }
635
636    /// Create an object-scanning work packet to be used for this ProcessEdgesWork.
637    ///
638    /// `roots` indicates if we are creating a packet for root scanning.  It is only true when this
639    /// method is called to handle `RootsWorkFactory::create_process_pinning_roots_work`.
640    ///
641    /// It normally returns `Some(work_packet)` and the `work_packet` should be added to the same
642    /// work bucket as `self`.  In some special cases, such as ConcurrentImmix, this function may
643    /// return `None`, which means the function has enqueued plan-specific object scanning work
644    /// packets that defer from `Self::ScanObjectsWorkType`.  In that case, there is no work packets
645    /// for the caller to add.
646    fn create_scan_work(&self, nodes: Vec<ObjectReference>) -> Option<Self::ScanObjectsWorkType>;
647
648    /// Flush the nodes in ProcessEdgesBase, and create a ScanObjects work packet for it. If the node set is empty,
649    /// this method will simply return with no work packet created.
650    fn flush(&mut self) {
651        let nodes = self.pop_nodes();
652        if !nodes.is_empty() {
653            if let Some(work_packet) = self.create_scan_work(nodes.clone()) {
654                self.start_or_dispatch_scan_work(work_packet);
655            }
656        }
657    }
658
659    /// Process a slot, including loading the object reference from the memory slot,
660    /// trace the object and store back the new object reference if necessary.
661    fn process_slot(&mut self, slot: SlotOf<Self>) {
662        let Some(object) = slot.load() else {
663            // Skip slots that are not holding an object reference.
664            return;
665        };
666        let new_object = self.trace_object(object);
667        if Self::OVERWRITE_REFERENCE && new_object != object {
668            slot.store(new_object);
669        }
670    }
671
672    /// Process all the slots in the work packet.
673    fn process_slots(&mut self) {
674        probe!(mmtk, process_slots, self.slots.len(), self.is_roots());
675        for i in 0..self.slots.len() {
676            self.process_slot(self.slots[i])
677        }
678    }
679}
680
681impl<E: ProcessEdgesWork> GCWork<E::VM> for E {
682    fn do_work(&mut self, worker: &mut GCWorker<E::VM>, _mmtk: &'static MMTK<E::VM>) {
683        self.set_worker(worker);
684        self.process_slots();
685        if !self.nodes.is_empty() {
686            self.flush();
687        }
688        #[cfg(feature = "sanity")]
689        if self.roots && !_mmtk.is_in_sanity() {
690            self.cache_roots_for_sanity_gc();
691        }
692        trace!("ProcessEdgesWork End");
693    }
694}
695
696/// A general implementation of [`ProcessEdgesWork`] using SFT. A plan can always implement their
697/// own [`ProcessEdgesWork`] instances. However, most plans can use this work packet for tracing amd
698/// they do not need to provide a plan-specific trace object work packet. If they choose to use this
699/// type, they need to provide a correct implementation for some related methods (such as
700/// `Space.set_copy_for_sft_trace()`, `SFT.sft_trace_object()`). Some plans are not using this type,
701/// mostly due to more complex tracing. Either it is impossible to use this type, or there is
702/// performance overheads for using this general trace type. In such cases, they implement their
703/// specific [`ProcessEdgesWork`] instances.
704// TODO: This is not used any more. Should we remove it?
705#[allow(dead_code)]
706pub struct SFTProcessEdges<VM: VMBinding> {
707    pub base: ProcessEdgesBase<VM>,
708}
709
710impl<VM: VMBinding> ProcessEdgesWork for SFTProcessEdges<VM> {
711    type VM = VM;
712    type ScanObjectsWorkType = ScanObjects<Self>;
713
714    fn new(
715        slots: Vec<SlotOf<Self>>,
716        roots: bool,
717        mmtk: &'static MMTK<VM>,
718        bucket: WorkBucketStage,
719    ) -> Self {
720        let base = ProcessEdgesBase::new(slots, roots, mmtk, bucket);
721        Self { base }
722    }
723
724    fn trace_object(&mut self, object: ObjectReference) -> ObjectReference {
725        use crate::policy::sft::GCWorkerMutRef;
726
727        // Erase <VM> type parameter
728        let worker = GCWorkerMutRef::new(self.worker());
729
730        // Invoke trace object on sft
731        let sft = unsafe { crate::mmtk::SFT_MAP.get_unchecked(object.to_raw_address()) };
732        sft.sft_trace_object(&mut self.base.nodes, object, worker)
733    }
734
735    fn create_scan_work(&self, nodes: Vec<ObjectReference>) -> Option<ScanObjects<Self>> {
736        Some(ScanObjects::<Self>::new(nodes, false, self.bucket))
737    }
738}
739
740/// An implementation of `RootsWorkFactory` that creates work packets based on `ProcessEdgesWork`
741/// for handling roots.  The `DPE` and the `PPE` type parameters correspond to the
742/// `DefaultProcessEdge` and the `PinningProcessEdges` type members of the [`GCWorkContext`] trait.
743pub(crate) struct ProcessEdgesWorkRootsWorkFactory<
744    VM: VMBinding,
745    DPE: ProcessEdgesWork<VM = VM>,
746    PPE: ProcessEdgesWork<VM = VM>,
747> {
748    mmtk: &'static MMTK<VM>,
749    phantom: PhantomData<(DPE, PPE)>,
750}
751
752impl<VM: VMBinding, DPE: ProcessEdgesWork<VM = VM>, PPE: ProcessEdgesWork<VM = VM>> Clone
753    for ProcessEdgesWorkRootsWorkFactory<VM, DPE, PPE>
754{
755    fn clone(&self) -> Self {
756        Self {
757            mmtk: self.mmtk,
758            phantom: PhantomData,
759        }
760    }
761}
762
763/// For USDT tracepoints for roots.
764/// Keep in sync with `tools/tracing/timeline/visualize.py`.
765#[repr(usize)]
766enum RootsKind {
767    NORMAL = 0,
768    PINNING = 1,
769    TPINNING = 2,
770}
771
772impl<VM: VMBinding, DPE: ProcessEdgesWork<VM = VM>, PPE: ProcessEdgesWork<VM = VM>>
773    RootsWorkFactory<VM::VMSlot> for ProcessEdgesWorkRootsWorkFactory<VM, DPE, PPE>
774{
775    fn create_process_roots_work(&mut self, slots: Vec<VM::VMSlot>) {
776        // Note: We should use the same USDT name "mmtk:roots" for all the three kinds of roots. A
777        // VM binding may not call all of the three methods in this impl. For example, the OpenJDK
778        // binding only calls `create_process_roots_work`, and the Ruby binding only calls
779        // `create_process_pinning_roots_work`. Because `ProcessEdgesWorkRootsWorkFactory<VM, DPE,
780        // PPE>` is a generic type, the Rust compiler emits the function bodies on demand, so the
781        // resulting machine code may not contain all three USDT trace points.  If they have
782        // different names, and our `capture.bt` mentions all of them, `bpftrace` may complain that
783        // it cannot find one or more of those USDT trace points in the binary.
784        probe!(mmtk, roots, RootsKind::NORMAL, slots.len());
785        crate::memory_manager::add_work_packet(
786            self.mmtk,
787            WorkBucketStage::Closure,
788            DPE::new(slots, true, self.mmtk, WorkBucketStage::Closure),
789        );
790    }
791
792    fn create_process_pinning_roots_work(&mut self, nodes: Vec<ObjectReference>) {
793        probe!(mmtk, roots, RootsKind::PINNING, nodes.len());
794        // Will process roots within the PinningRootsTrace bucket
795        // And put work in the Closure bucket
796        crate::memory_manager::add_work_packet(
797            self.mmtk,
798            WorkBucketStage::PinningRootsTrace,
799            ProcessRootNodes::<VM, PPE, DPE>::new(nodes, WorkBucketStage::Closure),
800        );
801    }
802
803    fn create_process_tpinning_roots_work(&mut self, nodes: Vec<ObjectReference>) {
804        probe!(mmtk, roots, RootsKind::TPINNING, nodes.len());
805        crate::memory_manager::add_work_packet(
806            self.mmtk,
807            WorkBucketStage::TPinningClosure,
808            ProcessRootNodes::<VM, PPE, PPE>::new(nodes, WorkBucketStage::TPinningClosure),
809        );
810    }
811}
812
813impl<VM: VMBinding, DPE: ProcessEdgesWork<VM = VM>, PPE: ProcessEdgesWork<VM = VM>>
814    ProcessEdgesWorkRootsWorkFactory<VM, DPE, PPE>
815{
816    fn new(mmtk: &'static MMTK<VM>) -> Self {
817        Self {
818            mmtk,
819            phantom: PhantomData,
820        }
821    }
822}
823
824impl<VM: VMBinding> Deref for SFTProcessEdges<VM> {
825    type Target = ProcessEdgesBase<VM>;
826    fn deref(&self) -> &Self::Target {
827        &self.base
828    }
829}
830
831impl<VM: VMBinding> DerefMut for SFTProcessEdges<VM> {
832    fn deref_mut(&mut self) -> &mut Self::Target {
833        &mut self.base
834    }
835}
836
837/// Trait for a work packet that scans objects
838pub trait ScanObjectsWork<VM: VMBinding>: GCWork<VM> + Sized {
839    /// The associated ProcessEdgesWork for processing the outgoing edges of the objects in this
840    /// packet.
841    type E: ProcessEdgesWork<VM = VM>;
842
843    /// Called after each object is scanned.
844    fn post_scan_object(&self, object: ObjectReference);
845
846    /// Return the work bucket for this work packet and its derived work packets.
847    fn get_bucket(&self) -> WorkBucketStage;
848
849    /// The common code for ScanObjects and PlanScanObjects.
850    fn do_work_common(
851        &self,
852        buffer: &[ObjectReference],
853        worker: &mut GCWorker<<Self::E as ProcessEdgesWork>::VM>,
854        mmtk: &'static MMTK<<Self::E as ProcessEdgesWork>::VM>,
855    ) {
856        let tls = worker.tls;
857
858        let objects_to_scan = buffer;
859
860        // Scan the objects in the list that supports slot-enququing.
861        let mut scan_later = vec![];
862        {
863            let mut closure = ObjectsClosure::<Self::E>::new(worker, self.get_bucket());
864
865            // For any object we need to scan, we count its live bytes.
866            // Check the option outside the loop for better performance.
867            if crate::util::rust_util::unlikely(*mmtk.get_options().count_live_bytes_in_gc) {
868                // Borrow before the loop.
869                let mut live_bytes_stats = closure.worker.shared.live_bytes_per_space.borrow_mut();
870                for object in objects_to_scan.iter().copied() {
871                    crate::scheduler::worker::GCWorkerShared::<VM>::increase_live_bytes(
872                        &mut live_bytes_stats,
873                        object,
874                    );
875                }
876            }
877
878            for object in objects_to_scan.iter().copied() {
879                if <VM as VMBinding>::VMScanning::support_slot_enqueuing(tls, object) {
880                    trace!("Scan object (slot) {}", object);
881                    // If an object supports slot-enqueuing, we enqueue its slots.
882                    <VM as VMBinding>::VMScanning::scan_object(tls, object, &mut closure);
883                    self.post_scan_object(object);
884                } else {
885                    // If an object does not support slot-enqueuing, we have to use
886                    // `Scanning::scan_object_and_trace_edges` and offload the job of updating the
887                    // reference field to the VM.
888                    //
889                    // However, at this point, `closure` is borrowing `worker`.
890                    // So we postpone the processing of objects that needs object enqueuing
891                    scan_later.push(object);
892                }
893            }
894        }
895
896        let total_objects = objects_to_scan.len();
897        let scan_and_trace = scan_later.len();
898        probe!(mmtk, scan_objects, total_objects, scan_and_trace);
899
900        // If any object does not support slot-enqueuing, we process them now.
901        if !scan_later.is_empty() {
902            let object_tracer_context = ProcessEdgesWorkTracerContext::<Self::E> {
903                stage: self.get_bucket(),
904                phantom_data: PhantomData,
905            };
906
907            object_tracer_context.with_tracer(worker, |object_tracer| {
908                // Scan objects and trace their outgoing edges at the same time.
909                for object in scan_later.iter().copied() {
910                    trace!("Scan object (node) {}", object);
911                    <VM as VMBinding>::VMScanning::scan_object_and_trace_edges(
912                        tls,
913                        object,
914                        object_tracer,
915                    );
916                    self.post_scan_object(object);
917                }
918            });
919        }
920    }
921}
922
923/// Scan objects and enqueue the slots of the objects.  For objects that do not support
924/// slot-enqueuing, this work packet also traces their outgoing edges directly.
925///
926/// This work packet does not execute policy-specific post-scanning hooks
927/// (it won't call `post_scan_object()` in [`policy::gc_work::PolicyTraceObject`]).
928/// It should be used only for policies that do not perform policy-specific actions when scanning
929/// an object.
930pub struct ScanObjects<Edges: ProcessEdgesWork> {
931    buffer: Vec<ObjectReference>,
932    #[allow(unused)]
933    concurrent: bool,
934    phantom: PhantomData<Edges>,
935    bucket: WorkBucketStage,
936}
937
938impl<Edges: ProcessEdgesWork> ScanObjects<Edges> {
939    pub fn new(buffer: Vec<ObjectReference>, concurrent: bool, bucket: WorkBucketStage) -> Self {
940        Self {
941            buffer,
942            concurrent,
943            phantom: PhantomData,
944            bucket,
945        }
946    }
947}
948
949impl<VM: VMBinding, E: ProcessEdgesWork<VM = VM>> ScanObjectsWork<VM> for ScanObjects<E> {
950    type E = E;
951
952    fn get_bucket(&self) -> WorkBucketStage {
953        self.bucket
954    }
955
956    fn post_scan_object(&self, _object: ObjectReference) {
957        // Do nothing.
958    }
959}
960
961impl<E: ProcessEdgesWork> GCWork<E::VM> for ScanObjects<E> {
962    fn do_work(&mut self, worker: &mut GCWorker<E::VM>, mmtk: &'static MMTK<E::VM>) {
963        trace!("ScanObjects");
964        self.do_work_common(&self.buffer, worker, mmtk);
965        trace!("ScanObjects End");
966    }
967}
968
969use crate::mmtk::MMTK;
970use crate::plan::Plan;
971use crate::plan::PlanTraceObject;
972use crate::policy::gc_work::TraceKind;
973
974/// This provides an implementation of [`crate::scheduler::gc_work::ProcessEdgesWork`]. A plan that implements
975/// `PlanTraceObject` can use this work packet for tracing objects.
976pub struct PlanProcessEdges<
977    VM: VMBinding,
978    P: Plan<VM = VM> + PlanTraceObject<VM>,
979    const KIND: TraceKind,
980> {
981    plan: &'static P,
982    base: ProcessEdgesBase<VM>,
983}
984
985impl<VM: VMBinding, P: PlanTraceObject<VM> + Plan<VM = VM>, const KIND: TraceKind> ProcessEdgesWork
986    for PlanProcessEdges<VM, P, KIND>
987{
988    type VM = VM;
989    type ScanObjectsWorkType = PlanScanObjects<Self, P>;
990
991    fn new(
992        slots: Vec<SlotOf<Self>>,
993        roots: bool,
994        mmtk: &'static MMTK<VM>,
995        bucket: WorkBucketStage,
996    ) -> Self {
997        let base = ProcessEdgesBase::new(slots, roots, mmtk, bucket);
998        let plan = base.plan().downcast_ref::<P>().unwrap();
999        Self { plan, base }
1000    }
1001
1002    fn create_scan_work(&self, nodes: Vec<ObjectReference>) -> Option<Self::ScanObjectsWorkType> {
1003        Some(PlanScanObjects::<Self, P>::new(
1004            self.plan,
1005            nodes,
1006            false,
1007            self.bucket,
1008        ))
1009    }
1010
1011    fn trace_object(&mut self, object: ObjectReference) -> ObjectReference {
1012        // We cannot borrow `self` twice in a call, so we extract `worker` as a local variable.
1013        let worker = self.worker();
1014        self.plan
1015            .trace_object::<VectorObjectQueue, KIND>(&mut self.base.nodes, object, worker)
1016    }
1017
1018    fn process_slot(&mut self, slot: SlotOf<Self>) {
1019        let Some(object) = slot.load() else {
1020            // Skip slots that are not holding an object reference.
1021            return;
1022        };
1023        let new_object = self.trace_object(object);
1024        if P::may_move_objects::<KIND>() && new_object != object {
1025            slot.store(new_object);
1026        }
1027    }
1028}
1029
1030// Impl Deref/DerefMut to ProcessEdgesBase for PlanProcessEdges
1031impl<VM: VMBinding, P: PlanTraceObject<VM> + Plan<VM = VM>, const KIND: TraceKind> Deref
1032    for PlanProcessEdges<VM, P, KIND>
1033{
1034    type Target = ProcessEdgesBase<VM>;
1035    fn deref(&self) -> &Self::Target {
1036        &self.base
1037    }
1038}
1039
1040impl<VM: VMBinding, P: PlanTraceObject<VM> + Plan<VM = VM>, const KIND: TraceKind> DerefMut
1041    for PlanProcessEdges<VM, P, KIND>
1042{
1043    fn deref_mut(&mut self) -> &mut Self::Target {
1044        &mut self.base
1045    }
1046}
1047
1048/// This is an alternative to `ScanObjects` that calls the `post_scan_object` of the policy
1049/// selected by the plan.  It is applicable to plans that derive `PlanTraceObject`.
1050pub struct PlanScanObjects<E: ProcessEdgesWork, P: Plan<VM = E::VM> + PlanTraceObject<E::VM>> {
1051    plan: &'static P,
1052    buffer: Vec<ObjectReference>,
1053    #[allow(dead_code)]
1054    concurrent: bool,
1055    phantom: PhantomData<E>,
1056    bucket: WorkBucketStage,
1057}
1058
1059impl<E: ProcessEdgesWork, P: Plan<VM = E::VM> + PlanTraceObject<E::VM>> PlanScanObjects<E, P> {
1060    pub fn new(
1061        plan: &'static P,
1062        buffer: Vec<ObjectReference>,
1063        concurrent: bool,
1064        bucket: WorkBucketStage,
1065    ) -> Self {
1066        Self {
1067            plan,
1068            buffer,
1069            concurrent,
1070            phantom: PhantomData,
1071            bucket,
1072        }
1073    }
1074}
1075
1076impl<E: ProcessEdgesWork, P: Plan<VM = E::VM> + PlanTraceObject<E::VM>> ScanObjectsWork<E::VM>
1077    for PlanScanObjects<E, P>
1078{
1079    type E = E;
1080
1081    fn get_bucket(&self) -> WorkBucketStage {
1082        self.bucket
1083    }
1084
1085    fn post_scan_object(&self, object: ObjectReference) {
1086        self.plan.post_scan_object(object);
1087    }
1088}
1089
1090impl<E: ProcessEdgesWork, P: Plan<VM = E::VM> + PlanTraceObject<E::VM>> GCWork<E::VM>
1091    for PlanScanObjects<E, P>
1092{
1093    fn do_work(&mut self, worker: &mut GCWorker<E::VM>, mmtk: &'static MMTK<E::VM>) {
1094        trace!("PlanScanObjects");
1095        self.do_work_common(&self.buffer, worker, mmtk);
1096        trace!("PlanScanObjects End");
1097    }
1098}
1099
1100/// This work packet processes pinning roots.
1101///
1102/// The `roots` member holds a list of `ObjectReference` to objects directly pointed by roots.
1103/// These objects will be traced using `R2OPE` (Root-to-Object Process Edges).
1104///
1105/// After that, it will create work packets for tracing their children.  Those work packets (and
1106/// the work packets further created by them) will use `O2OPE` (Object-to-Object Process Edges) as
1107/// their `ProcessEdgesWork` implementations.
1108///
1109/// Because `roots` are pinning roots, `R2OPE` must be a `ProcessEdgesWork` that never moves any
1110/// object.
1111///
1112/// The choice of `O2OPE` determines whether the `roots` are transitively pinning or not.
1113///
1114/// -   If `O2OPE` is set to a `ProcessEdgesWork` that never moves objects, all descendents of
1115///     `roots` will not be moved in this GC.  That implements transitive pinning roots.
1116/// -   If `O2OPE` may move objects, then this `ProcessRootsNode<VM, R2OPE, O2OPE>` work packet
1117///     will only pin the objects in `roots` (because `R2OPE` must not move objects anyway), but
1118///     not their descendents.
1119pub(crate) struct ProcessRootNodes<
1120    VM: VMBinding,
1121    R2OPE: ProcessEdgesWork<VM = VM>,
1122    O2OPE: ProcessEdgesWork<VM = VM>,
1123> {
1124    phantom: PhantomData<(VM, R2OPE, O2OPE)>,
1125    roots: Vec<ObjectReference>,
1126    bucket: WorkBucketStage,
1127}
1128
1129impl<VM: VMBinding, R2OPE: ProcessEdgesWork<VM = VM>, O2OPE: ProcessEdgesWork<VM = VM>>
1130    ProcessRootNodes<VM, R2OPE, O2OPE>
1131{
1132    pub fn new(nodes: Vec<ObjectReference>, bucket: WorkBucketStage) -> Self {
1133        Self {
1134            phantom: PhantomData,
1135            roots: nodes,
1136            bucket,
1137        }
1138    }
1139}
1140
1141impl<VM: VMBinding, R2OPE: ProcessEdgesWork<VM = VM>, O2OPE: ProcessEdgesWork<VM = VM>> GCWork<VM>
1142    for ProcessRootNodes<VM, R2OPE, O2OPE>
1143{
1144    fn do_work(&mut self, worker: &mut GCWorker<VM>, mmtk: &'static MMTK<VM>) {
1145        trace!("ProcessRootNodes");
1146
1147        #[cfg(feature = "sanity")]
1148        {
1149            if !mmtk.is_in_sanity() {
1150                mmtk.sanity_checker
1151                    .lock()
1152                    .unwrap()
1153                    .add_root_nodes(self.roots.clone());
1154            }
1155        }
1156
1157        let num_roots = self.roots.len();
1158
1159        // This step conceptually traces the edges from root slots to the objects they point to.
1160        // However, VMs that deliver root objects instead of root slots are incapable of updating
1161        // root slots.  Therefore, we call `trace_object` on those objects, and assert the GC
1162        // doesn't move those objects because we cannot store the updated references back to the
1163        // slots.
1164        //
1165        // The `root_objects_to_scan` variable will hold those root objects which are traced for the
1166        // first time.  We will create a work packet for scanning those roots.
1167        let root_objects_to_scan = {
1168            // We create an instance of E to use its `trace_object` method and its object queue.
1169            let mut process_edges_work =
1170                R2OPE::new(vec![], true, mmtk, WorkBucketStage::PinningRootsTrace);
1171            process_edges_work.set_worker(worker);
1172
1173            for object in self.roots.iter().copied() {
1174                let new_object = process_edges_work.trace_object(object);
1175                debug_assert_eq!(
1176                    object, new_object,
1177                    "Object moved while tracing root unmovable root object: {} -> {}",
1178                    object, new_object
1179                );
1180            }
1181
1182            // This contains root objects that are visited the first time.
1183            // It is sufficient to only scan these objects.
1184            process_edges_work.nodes.take()
1185        };
1186
1187        let num_enqueued_nodes = root_objects_to_scan.len();
1188        probe!(mmtk, process_root_nodes, num_roots, num_enqueued_nodes);
1189
1190        if !root_objects_to_scan.is_empty() {
1191            let mut process_edges_work = O2OPE::new(vec![], true, mmtk, self.bucket);
1192            process_edges_work.set_worker(worker);
1193            if let Some(work) = process_edges_work.create_scan_work(root_objects_to_scan) {
1194                crate::memory_manager::add_work_packet(mmtk, self.bucket, work);
1195            }
1196        }
1197
1198        trace!("ProcessRootNodes End");
1199    }
1200}
1201
1202/// A `ProcessEdgesWork` type that panics when any of its method is used.
1203/// This is currently used for plans that do not support transitively pinning.
1204#[derive(Default)]
1205pub struct UnsupportedProcessEdges<VM: VMBinding> {
1206    phantom: PhantomData<VM>,
1207}
1208
1209impl<VM: VMBinding> Deref for UnsupportedProcessEdges<VM> {
1210    type Target = ProcessEdgesBase<VM>;
1211    fn deref(&self) -> &Self::Target {
1212        panic!("unsupported!")
1213    }
1214}
1215
1216impl<VM: VMBinding> DerefMut for UnsupportedProcessEdges<VM> {
1217    fn deref_mut(&mut self) -> &mut Self::Target {
1218        panic!("unsupported!")
1219    }
1220}
1221
1222impl<VM: VMBinding> ProcessEdgesWork for UnsupportedProcessEdges<VM> {
1223    type VM = VM;
1224
1225    type ScanObjectsWorkType = ScanObjects<Self>;
1226
1227    fn new(
1228        _slots: Vec<SlotOf<Self>>,
1229        _roots: bool,
1230        _mmtk: &'static MMTK<Self::VM>,
1231        _bucket: WorkBucketStage,
1232    ) -> Self {
1233        panic!("unsupported!")
1234    }
1235
1236    fn trace_object(&mut self, _object: ObjectReference) -> ObjectReference {
1237        panic!("unsupported!")
1238    }
1239
1240    fn create_scan_work(&self, _nodes: Vec<ObjectReference>) -> Option<Self::ScanObjectsWorkType> {
1241        panic!("unsupported!")
1242    }
1243}