mmtk/scheduler/
gc_work.rs

1use super::work_bucket::WorkBucketStage;
2use super::*;
3use crate::global_state::GcStatus;
4use crate::plan::ObjectsClosure;
5use crate::plan::VectorObjectQueue;
6use crate::util::*;
7use crate::vm::slot::Slot;
8use crate::vm::*;
9use crate::*;
10use std::marker::PhantomData;
11use std::ops::{Deref, DerefMut};
12
13pub struct ScheduleCollection;
14
15impl<VM: VMBinding> GCWork<VM> for ScheduleCollection {
16    fn do_work(&mut self, worker: &mut GCWorker<VM>, mmtk: &'static MMTK<VM>) {
17        // Tell GC trigger that GC started.
18        mmtk.gc_trigger.policy.on_gc_start(mmtk);
19
20        // Determine collection kind
21        let is_emergency = mmtk.state.set_collection_kind(
22            mmtk.get_plan().last_collection_was_exhaustive(),
23            mmtk.gc_trigger.policy.can_heap_size_grow(),
24        );
25        if is_emergency {
26            mmtk.get_plan().notify_emergency_collection();
27        }
28        // Set to GcPrepare
29        mmtk.set_gc_status(GcStatus::GcPrepare);
30
31        // Let the plan to schedule collection work
32        mmtk.get_plan().schedule_collection(worker.scheduler());
33    }
34}
35
36/// The global GC Preparation Work
37/// This work packet invokes prepare() for the plan (which will invoke prepare() for each space), and
38/// pushes work packets for preparing mutators and collectors.
39/// We should only have one such work packet per GC, before any actual GC work starts.
40/// We assume this work packet is the only running work packet that accesses plan, and there should
41/// be no other concurrent work packet that accesses plan (read or write). Otherwise, there may
42/// be a race condition.
43pub struct Prepare<C: GCWorkContext> {
44    pub plan: *const C::PlanType,
45}
46
47unsafe impl<C: GCWorkContext> Send for Prepare<C> {}
48
49impl<C: GCWorkContext> Prepare<C> {
50    pub fn new(plan: *const C::PlanType) -> Self {
51        Self { plan }
52    }
53}
54
55impl<C: GCWorkContext> GCWork<C::VM> for Prepare<C> {
56    fn do_work(&mut self, worker: &mut GCWorker<C::VM>, mmtk: &'static MMTK<C::VM>) {
57        trace!("Prepare Global");
58        // We assume this is the only running work packet that accesses plan at the point of execution
59        let plan_mut: &mut C::PlanType = unsafe { &mut *(self.plan as *const _ as *mut _) };
60        plan_mut.prepare(worker.tls);
61
62        if plan_mut.constraints().needs_prepare_mutator {
63            let prepare_mutator_packets = <C::VM as VMBinding>::VMActivePlan::mutators()
64                .map(|mutator| Box::new(PrepareMutator::<C::VM>::new(mutator)) as _)
65                .collect::<Vec<_>>();
66            // Just in case the VM binding is inconsistent about the number of mutators and the actual mutator list.
67            debug_assert_eq!(
68                prepare_mutator_packets.len(),
69                <C::VM as VMBinding>::VMActivePlan::number_of_mutators()
70            );
71            mmtk.scheduler.work_buckets[WorkBucketStage::Prepare].bulk_add(prepare_mutator_packets);
72        }
73
74        for w in &mmtk.scheduler.worker_group.workers_shared {
75            let result = w.designated_work.push(Box::new(PrepareCollector));
76            debug_assert!(result.is_ok());
77        }
78    }
79}
80
81/// The mutator GC Preparation Work
82pub struct PrepareMutator<VM: VMBinding> {
83    // The mutator reference has static lifetime.
84    // It is safe because the actual lifetime of this work-packet will not exceed the lifetime of a GC.
85    pub mutator: &'static mut Mutator<VM>,
86}
87
88impl<VM: VMBinding> PrepareMutator<VM> {
89    pub fn new(mutator: &'static mut Mutator<VM>) -> Self {
90        Self { mutator }
91    }
92}
93
94impl<VM: VMBinding> GCWork<VM> for PrepareMutator<VM> {
95    fn do_work(&mut self, worker: &mut GCWorker<VM>, _mmtk: &'static MMTK<VM>) {
96        trace!("Prepare Mutator");
97        self.mutator.prepare(worker.tls);
98    }
99}
100
101/// The collector GC Preparation Work
102#[derive(Default)]
103pub struct PrepareCollector;
104
105impl<VM: VMBinding> GCWork<VM> for PrepareCollector {
106    fn do_work(&mut self, worker: &mut GCWorker<VM>, mmtk: &'static MMTK<VM>) {
107        trace!("Prepare Collector");
108        worker.get_copy_context_mut().prepare();
109        mmtk.get_plan().prepare_worker(worker);
110    }
111}
112
113/// The global GC release Work
114/// This work packet invokes release() for the plan (which will invoke release() for each space), and
115/// pushes work packets for releasing mutators and collectors.
116/// We should only have one such work packet per GC, after all actual GC work ends.
117/// We assume this work packet is the only running work packet that accesses plan, and there should
118/// be no other concurrent work packet that accesses plan (read or write). Otherwise, there may
119/// be a race condition.
120pub struct Release<C: GCWorkContext> {
121    pub plan: *const C::PlanType,
122}
123
124impl<C: GCWorkContext> Release<C> {
125    pub fn new(plan: *const C::PlanType) -> Self {
126        Self { plan }
127    }
128}
129
130unsafe impl<C: GCWorkContext> Send for Release<C> {}
131
132impl<C: GCWorkContext + 'static> GCWork<C::VM> for Release<C> {
133    fn do_work(&mut self, worker: &mut GCWorker<C::VM>, mmtk: &'static MMTK<C::VM>) {
134        trace!("Release Global");
135
136        mmtk.gc_trigger.policy.on_gc_release(mmtk);
137        // We assume this is the only running work packet that accesses plan at the point of execution
138
139        let plan_mut: &mut C::PlanType = unsafe { &mut *(self.plan as *const _ as *mut _) };
140        plan_mut.release(worker.tls);
141
142        let release_mutator_packets = <C::VM as VMBinding>::VMActivePlan::mutators()
143            .map(|mutator| Box::new(ReleaseMutator::<C::VM>::new(mutator)) as _)
144            .collect::<Vec<_>>();
145        // Just in case the VM binding is inconsistent about the number of mutators and the actual mutator list.
146        debug_assert_eq!(
147            release_mutator_packets.len(),
148            <C::VM as VMBinding>::VMActivePlan::number_of_mutators()
149        );
150        mmtk.scheduler.work_buckets[WorkBucketStage::Release].bulk_add(release_mutator_packets);
151
152        for w in &mmtk.scheduler.worker_group.workers_shared {
153            let result = w.designated_work.push(Box::new(ReleaseCollector));
154            debug_assert!(result.is_ok());
155        }
156    }
157}
158
159/// The mutator release Work
160pub struct ReleaseMutator<VM: VMBinding> {
161    // The mutator reference has static lifetime.
162    // It is safe because the actual lifetime of this work-packet will not exceed the lifetime of a GC.
163    pub mutator: &'static mut Mutator<VM>,
164}
165
166impl<VM: VMBinding> ReleaseMutator<VM> {
167    pub fn new(mutator: &'static mut Mutator<VM>) -> Self {
168        Self { mutator }
169    }
170}
171
172impl<VM: VMBinding> GCWork<VM> for ReleaseMutator<VM> {
173    fn do_work(&mut self, worker: &mut GCWorker<VM>, _mmtk: &'static MMTK<VM>) {
174        trace!("Release Mutator");
175        self.mutator.release(worker.tls);
176    }
177}
178
179/// The collector release Work
180#[derive(Default)]
181pub struct ReleaseCollector;
182
183impl<VM: VMBinding> GCWork<VM> for ReleaseCollector {
184    fn do_work(&mut self, worker: &mut GCWorker<VM>, _mmtk: &'static MMTK<VM>) {
185        trace!("Release Collector");
186        worker.get_copy_context_mut().release();
187    }
188}
189
190/// Stop all mutators
191///
192/// TODO: Smaller work granularity
193#[derive(Default)]
194pub struct StopMutators<C: GCWorkContext> {
195    /// If this is true, we skip creating [`ScanMutatorRoots`] work packets for mutators.
196    /// By default, this is false.
197    skip_mutator_roots: bool,
198    /// Flush mutators once they are stopped. By default this is false. [`ScanMutatorRoots`] will flush mutators.
199    flush_mutator: bool,
200    phantom: PhantomData<C>,
201}
202
203impl<C: GCWorkContext> StopMutators<C> {
204    pub fn new() -> Self {
205        Self {
206            skip_mutator_roots: false,
207            flush_mutator: false,
208            phantom: PhantomData,
209        }
210    }
211
212    /// Create a `StopMutators` work packet that does not create `ScanMutatorRoots` work packets for mutators, and will simply flush mutators.
213    pub fn new_no_scan_roots() -> Self {
214        Self {
215            skip_mutator_roots: true,
216            flush_mutator: true,
217            phantom: PhantomData,
218        }
219    }
220}
221
222impl<C: GCWorkContext> GCWork<C::VM> for StopMutators<C> {
223    fn do_work(&mut self, worker: &mut GCWorker<C::VM>, mmtk: &'static MMTK<C::VM>) {
224        trace!("stop_all_mutators start");
225        mmtk.state.prepare_for_stack_scanning();
226        <C::VM as VMBinding>::VMCollection::stop_all_mutators(worker.tls, |mutator| {
227            // TODO: The stack scanning work won't start immediately, as the `Prepare` bucket is not opened yet (the bucket is opened in notify_mutators_paused).
228            // Should we push to Unconstrained instead?
229
230            if self.flush_mutator {
231                mutator.flush();
232            }
233            if !self.skip_mutator_roots {
234                mmtk.scheduler.work_buckets[WorkBucketStage::Prepare]
235                    .add(ScanMutatorRoots::<C>(mutator));
236            }
237        });
238        trace!("stop_all_mutators end");
239        mmtk.get_plan().notify_mutators_paused(&mmtk.scheduler);
240        mmtk.scheduler.notify_mutators_paused(mmtk);
241        mmtk.scheduler.work_buckets[WorkBucketStage::Prepare].add(ScanVMSpecificRoots::<C>::new());
242    }
243}
244
245/// This implements `ObjectTracer` by forwarding the `trace_object` calls to the wrapped
246/// `ProcessEdgesWork` instance.
247pub(crate) struct ProcessEdgesWorkTracer<E: ProcessEdgesWork> {
248    process_edges_work: E,
249    stage: WorkBucketStage,
250}
251
252impl<E: ProcessEdgesWork> ObjectTracer for ProcessEdgesWorkTracer<E> {
253    /// Forward the `trace_object` call to the underlying `ProcessEdgesWork`,
254    /// and flush as soon as the underlying buffer of `process_edges_work` is full.
255    fn trace_object(&mut self, object: ObjectReference) -> ObjectReference {
256        let result = self.process_edges_work.trace_object(object);
257        self.flush_if_full();
258        result
259    }
260}
261
262impl<E: ProcessEdgesWork> ProcessEdgesWorkTracer<E> {
263    fn flush_if_full(&mut self) {
264        if self.process_edges_work.nodes.is_full() {
265            self.flush();
266        }
267    }
268
269    pub fn flush_if_not_empty(&mut self) {
270        if !self.process_edges_work.nodes.is_empty() {
271            self.flush();
272        }
273    }
274
275    fn flush(&mut self) {
276        let next_nodes = self.process_edges_work.pop_nodes();
277        assert!(!next_nodes.is_empty());
278        if let Some(work_packet) = self.process_edges_work.create_scan_work(next_nodes) {
279            let worker = self.process_edges_work.worker();
280            worker.scheduler().work_buckets[self.stage].add(work_packet);
281        }
282    }
283}
284
285/// This type implements `ObjectTracerContext` by creating a temporary `ProcessEdgesWork` during
286/// the call to `with_tracer`, making use of its `trace_object` method.  It then creates work
287/// packets using the methods of the `ProcessEdgesWork` and add the work packet into the given
288/// `stage`.
289pub(crate) struct ProcessEdgesWorkTracerContext<E: ProcessEdgesWork> {
290    stage: WorkBucketStage,
291    phantom_data: PhantomData<E>,
292}
293
294impl<E: ProcessEdgesWork> Clone for ProcessEdgesWorkTracerContext<E> {
295    fn clone(&self) -> Self {
296        Self { ..*self }
297    }
298}
299
300impl<E: ProcessEdgesWork> ObjectTracerContext<E::VM> for ProcessEdgesWorkTracerContext<E> {
301    type TracerType = ProcessEdgesWorkTracer<E>;
302
303    fn with_tracer<R, F>(&self, worker: &mut GCWorker<E::VM>, func: F) -> R
304    where
305        F: FnOnce(&mut Self::TracerType) -> R,
306    {
307        let mmtk = worker.mmtk;
308
309        // Prepare the underlying ProcessEdgesWork
310        let mut process_edges_work = E::new(vec![], false, mmtk, self.stage);
311        // FIXME: This line allows us to omit the borrowing lifetime of worker.
312        // We should refactor ProcessEdgesWork so that it uses `worker` locally, not as a member.
313        process_edges_work.set_worker(worker);
314
315        // Cretae the tracer.
316        let mut tracer = ProcessEdgesWorkTracer {
317            process_edges_work,
318            stage: self.stage,
319        };
320
321        // The caller can use the tracer here.
322        let result = func(&mut tracer);
323
324        // Flush the queued nodes.
325        tracer.flush_if_not_empty();
326
327        result
328    }
329}
330
331/// Delegate to the VM binding for weak reference processing.
332///
333/// Some VMs (e.g. v8) do not have a Java-like global weak reference storage, and the
334/// processing of those weakrefs may be more complex. For such case, we delegate to the
335/// VM binding to process weak references.
336///
337/// NOTE: This will replace `{Soft,Weak,Phantom}RefProcessing` and `Finalization` in the future.
338pub struct VMProcessWeakRefs<E: ProcessEdgesWork> {
339    phantom_data: PhantomData<E>,
340}
341
342impl<E: ProcessEdgesWork> VMProcessWeakRefs<E> {
343    pub fn new() -> Self {
344        Self {
345            phantom_data: PhantomData,
346        }
347    }
348}
349
350impl<E: ProcessEdgesWork> GCWork<E::VM> for VMProcessWeakRefs<E> {
351    fn do_work(&mut self, worker: &mut GCWorker<E::VM>, _mmtk: &'static MMTK<E::VM>) {
352        trace!("VMProcessWeakRefs");
353
354        let stage = WorkBucketStage::VMRefClosure;
355
356        let need_to_repeat = {
357            let tracer_factory = ProcessEdgesWorkTracerContext::<E> {
358                stage,
359                phantom_data: PhantomData,
360            };
361            <E::VM as VMBinding>::VMScanning::process_weak_refs(worker, tracer_factory)
362        };
363
364        if need_to_repeat {
365            // Schedule Self as the new sentinel so we'll call `process_weak_refs` again after the
366            // current transitive closure.
367            let new_self = Box::new(Self::new());
368
369            worker.scheduler().work_buckets[stage].set_sentinel(new_self);
370        }
371    }
372}
373
374/// Delegate to the VM binding for forwarding weak references.
375///
376/// Some VMs (e.g. v8) do not have a Java-like global weak reference storage, and the
377/// processing of those weakrefs may be more complex. For such case, we delegate to the
378/// VM binding to process weak references.
379///
380/// NOTE: This will replace `RefForwarding` and `ForwardFinalization` in the future.
381pub struct VMForwardWeakRefs<E: ProcessEdgesWork> {
382    phantom_data: PhantomData<E>,
383}
384
385impl<E: ProcessEdgesWork> VMForwardWeakRefs<E> {
386    pub fn new() -> Self {
387        Self {
388            phantom_data: PhantomData,
389        }
390    }
391}
392
393impl<E: ProcessEdgesWork> GCWork<E::VM> for VMForwardWeakRefs<E> {
394    fn do_work(&mut self, worker: &mut GCWorker<E::VM>, _mmtk: &'static MMTK<E::VM>) {
395        trace!("VMForwardWeakRefs");
396
397        let stage = WorkBucketStage::VMRefForwarding;
398
399        let tracer_factory = ProcessEdgesWorkTracerContext::<E> {
400            stage,
401            phantom_data: PhantomData,
402        };
403        <E::VM as VMBinding>::VMScanning::forward_weak_refs(worker, tracer_factory)
404    }
405}
406
407/// This work packet calls `Collection::post_forwarding`.
408///
409/// NOTE: This will replace `RefEnqueue` in the future.
410///
411/// NOTE: Although this work packet runs in parallel with the `Release` work packet, it does not
412/// access the `Plan` instance.
413#[derive(Default)]
414pub struct VMPostForwarding<VM: VMBinding> {
415    phantom_data: PhantomData<VM>,
416}
417
418impl<VM: VMBinding> GCWork<VM> for VMPostForwarding<VM> {
419    fn do_work(&mut self, worker: &mut GCWorker<VM>, _mmtk: &'static MMTK<VM>) {
420        trace!("VMPostForwarding start");
421        <VM as VMBinding>::VMCollection::post_forwarding(worker.tls);
422        trace!("VMPostForwarding end");
423    }
424}
425
426pub struct ScanMutatorRoots<C: GCWorkContext>(pub &'static mut Mutator<C::VM>);
427
428impl<C: GCWorkContext> GCWork<C::VM> for ScanMutatorRoots<C> {
429    fn do_work(&mut self, worker: &mut GCWorker<C::VM>, mmtk: &'static MMTK<C::VM>) {
430        trace!("ScanMutatorRoots for mutator {:?}", self.0.get_tls());
431        let mutators = <C::VM as VMBinding>::VMActivePlan::number_of_mutators();
432        let factory = ProcessEdgesWorkRootsWorkFactory::<
433            C::VM,
434            C::DefaultProcessEdges,
435            C::PinningProcessEdges,
436        >::new(mmtk);
437        <C::VM as VMBinding>::VMScanning::scan_roots_in_mutator_thread(
438            worker.tls,
439            unsafe { &mut *(self.0 as *mut _) },
440            factory,
441        );
442        self.0.flush();
443
444        if mmtk.state.inform_stack_scanned(mutators) {
445            <C::VM as VMBinding>::VMScanning::notify_initial_thread_scan_complete(
446                false, worker.tls,
447            );
448            mmtk.set_gc_status(GcStatus::GcProper);
449        }
450    }
451}
452
453#[derive(Default)]
454pub struct ScanVMSpecificRoots<C: GCWorkContext>(PhantomData<C>);
455
456impl<C: GCWorkContext> ScanVMSpecificRoots<C> {
457    pub fn new() -> Self {
458        Self(PhantomData)
459    }
460}
461
462impl<C: GCWorkContext> GCWork<C::VM> for ScanVMSpecificRoots<C> {
463    fn do_work(&mut self, worker: &mut GCWorker<C::VM>, mmtk: &'static MMTK<C::VM>) {
464        trace!("ScanStaticRoots");
465        let factory = ProcessEdgesWorkRootsWorkFactory::<
466            C::VM,
467            C::DefaultProcessEdges,
468            C::PinningProcessEdges,
469        >::new(mmtk);
470        <C::VM as VMBinding>::VMScanning::scan_vm_specific_roots(worker.tls, factory);
471    }
472}
473
474pub struct ProcessEdgesBase<VM: VMBinding> {
475    pub slots: Vec<VM::VMSlot>,
476    pub nodes: VectorObjectQueue,
477    mmtk: &'static MMTK<VM>,
478    // Use raw pointer for fast pointer dereferencing, instead of using `Option<&'static mut GCWorker<E::VM>>`.
479    // Because a copying gc will dereference this pointer at least once for every object copy.
480    worker: *mut GCWorker<VM>,
481    pub roots: bool,
482    pub bucket: WorkBucketStage,
483}
484
485unsafe impl<VM: VMBinding> Send for ProcessEdgesBase<VM> {}
486
487impl<VM: VMBinding> ProcessEdgesBase<VM> {
488    // Requires an MMTk reference. Each plan-specific type that uses ProcessEdgesBase can get a static plan reference
489    // at creation. This avoids overhead for dynamic dispatch or downcasting plan for each object traced.
490    pub fn new(
491        slots: Vec<VM::VMSlot>,
492        roots: bool,
493        mmtk: &'static MMTK<VM>,
494        bucket: WorkBucketStage,
495    ) -> Self {
496        #[cfg(feature = "extreme_assertions")]
497        if crate::util::slot_logger::should_check_duplicate_slots(mmtk.get_plan()) {
498            for slot in &slots {
499                // log slot, panic if already logged
500                mmtk.slot_logger.log_slot(*slot);
501            }
502        }
503        Self {
504            slots,
505            nodes: VectorObjectQueue::new(),
506            mmtk,
507            worker: std::ptr::null_mut(),
508            roots,
509            bucket,
510        }
511    }
512    pub fn set_worker(&mut self, worker: &mut GCWorker<VM>) {
513        self.worker = worker;
514    }
515
516    pub fn worker(&self) -> &'static mut GCWorker<VM> {
517        unsafe { &mut *self.worker }
518    }
519
520    pub fn mmtk(&self) -> &'static MMTK<VM> {
521        self.mmtk
522    }
523
524    pub fn plan(&self) -> &'static dyn Plan<VM = VM> {
525        self.mmtk.get_plan()
526    }
527
528    /// Pop all nodes from nodes, and clear nodes to an empty vector.
529    pub fn pop_nodes(&mut self) -> Vec<ObjectReference> {
530        self.nodes.take()
531    }
532
533    pub fn is_roots(&self) -> bool {
534        self.roots
535    }
536}
537
538/// A short-hand for `<E::VM as VMBinding>::VMSlot`.
539pub type SlotOf<E> = <<E as ProcessEdgesWork>::VM as VMBinding>::VMSlot;
540
541/// An abstract trait for work packets that process object graph edges.  Its method
542/// [`ProcessEdgesWork::trace_object`] traces an object and, upon first visit, enqueues it into an
543/// internal queue inside the `ProcessEdgesWork` instance.  Each implementation of this trait
544/// implement `trace_object` differently.  During [`Plan::schedule_collection`], plans select
545/// (usually via `GCWorkContext`) specialized implementations of this trait to be used during each
546/// trace according the nature of each trace, such as whether it is a nursery collection, whether it
547/// is a defrag collection, whether it pins objects, etc.
548///
549/// This trait was originally designed for work packets that process object graph edges represented
550/// as slots.  The constructor [`ProcessEdgesWork::new`] takes a vector of slots, and the created
551/// work packet will trace the objects pointed by the object reference in each slot using the
552/// `trace_object` method, and update the slot if the GC moves the target object when tracing.
553///
554/// This trait can also be used merely as a provider of the `trace_object` method by giving it an
555/// empty vector of slots.  This is useful for node-enqueuing tracing
556/// ([`Scanning::scan_object_and_trace_edges`]) as well as weak reference processing
557/// ([`Scanning::process_weak_refs`] as well as `ReferenceProcessor` and `FinalizableProcessor`).
558/// In those cases, the caller passes the reference to the target object to `trace_object`, an the
559/// caller is responsible for updating the slots according the return value of `trace_object`.
560///
561/// TODO: We should refactor this trait to decouple it from slots. See:
562/// <https://github.com/mmtk/mmtk-core/issues/599>
563pub trait ProcessEdgesWork:
564    Send + 'static + Sized + DerefMut + Deref<Target = ProcessEdgesBase<Self::VM>>
565{
566    /// The associate type for the VM.
567    type VM: VMBinding;
568
569    /// The work packet type for scanning objects when using this ProcessEdgesWork.
570    type ScanObjectsWorkType: ScanObjectsWork<Self::VM>;
571
572    /// The maximum number of slots that should be put to one of this work packets.
573    /// The caller who creates a work packet of this trait should be responsible to
574    /// comply with this capacity.
575    /// Higher capacity means the packet will take longer to finish, and may lead to
576    /// bad load balancing. On the other hand, lower capacity would lead to higher cost
577    /// on scheduling many small work packets. It is important to find a proper capacity.
578    const CAPACITY: usize = EDGES_WORK_BUFFER_SIZE;
579    /// Do we update object reference? This has to be true for a moving GC.
580    const OVERWRITE_REFERENCE: bool = true;
581    /// If true, we do object scanning in this work packet with the same worker without scheduling overhead.
582    /// If false, we will add object scanning work packets to the global queue and allow other workers to work on it.
583    const SCAN_OBJECTS_IMMEDIATELY: bool = true;
584
585    /// Create a [`ProcessEdgesWork`].
586    ///
587    /// Arguments:
588    /// * `slots`: a vector of slots.
589    /// * `roots`: are the objects root reachable objects?
590    /// * `mmtk`: a reference to the MMTK instance.
591    /// * `bucket`: which work bucket this packet belongs to. Further work generated from this packet will also be put to the same bucket.
592    fn new(
593        slots: Vec<SlotOf<Self>>,
594        roots: bool,
595        mmtk: &'static MMTK<Self::VM>,
596        bucket: WorkBucketStage,
597    ) -> Self;
598
599    /// Trace an MMTk object. The implementation should forward this call to the policy-specific
600    /// `trace_object()` methods, depending on which space this object is in.
601    /// If the object is not in any MMTk space, the implementation should forward the call to
602    /// `ActivePlan::vm_trace_object()` to let the binding handle the tracing.
603    fn trace_object(&mut self, object: ObjectReference) -> ObjectReference;
604
605    /// If the work includes roots, we will store the roots somewhere so for sanity GC, we can do another
606    /// transitive closure from the roots.
607    #[cfg(feature = "sanity")]
608    fn cache_roots_for_sanity_gc(&mut self) {
609        assert!(self.roots);
610        self.mmtk()
611            .sanity_checker
612            .lock()
613            .unwrap()
614            .add_root_slots(self.slots.clone());
615    }
616
617    /// Start the a scan work packet. If SCAN_OBJECTS_IMMEDIATELY, the work packet will be executed immediately, in this method.
618    /// Otherwise, the work packet will be added the Closure work bucket and will be dispatched later by the scheduler.
619    fn start_or_dispatch_scan_work(&mut self, mut work_packet: impl GCWork<Self::VM>) {
620        if Self::SCAN_OBJECTS_IMMEDIATELY {
621            // We execute this `scan_objects_work` immediately.
622            // This is expected to be a useful optimization because,
623            // say for _pmd_ with 200M heap, we're likely to have 50000~60000 `ScanObjects` work packets
624            // being dispatched (similar amount to `ProcessEdgesWork`).
625            // Executing these work packets now can remarkably reduce the global synchronization time.
626            work_packet.do_work(self.worker(), self.mmtk);
627        } else {
628            debug_assert!(self.bucket != WorkBucketStage::Unconstrained);
629            self.mmtk.scheduler.work_buckets[self.bucket].add(work_packet);
630        }
631    }
632
633    /// Create an object-scanning work packet to be used for this ProcessEdgesWork.
634    ///
635    /// `roots` indicates if we are creating a packet for root scanning.  It is only true when this
636    /// method is called to handle `RootsWorkFactory::create_process_pinning_roots_work`.
637    ///
638    /// It normally returns `Some(work_packet)` and the `work_packet` should be added to the same
639    /// work bucket as `self`.  In some special cases, such as ConcurrentImmix, this function may
640    /// return `None`, which means the function has enqueued plan-specific object scanning work
641    /// packets that defer from `Self::ScanObjectsWorkType`.  In that case, there is no work packets
642    /// for the caller to add.
643    fn create_scan_work(&self, nodes: Vec<ObjectReference>) -> Option<Self::ScanObjectsWorkType>;
644
645    /// Flush the nodes in ProcessEdgesBase, and create a ScanObjects work packet for it. If the node set is empty,
646    /// this method will simply return with no work packet created.
647    fn flush(&mut self) {
648        let nodes = self.pop_nodes();
649        if !nodes.is_empty() {
650            if let Some(work_packet) = self.create_scan_work(nodes.clone()) {
651                self.start_or_dispatch_scan_work(work_packet);
652            }
653        }
654    }
655
656    /// Process a slot, including loading the object reference from the memory slot,
657    /// trace the object and store back the new object reference if necessary.
658    fn process_slot(&mut self, slot: SlotOf<Self>) {
659        let Some(object) = slot.load() else {
660            // Skip slots that are not holding an object reference.
661            return;
662        };
663        let new_object = self.trace_object(object);
664        if Self::OVERWRITE_REFERENCE && new_object != object {
665            slot.store(new_object);
666        }
667    }
668
669    /// Process all the slots in the work packet.
670    fn process_slots(&mut self) {
671        probe!(mmtk, process_slots, self.slots.len(), self.is_roots());
672        for i in 0..self.slots.len() {
673            self.process_slot(self.slots[i])
674        }
675    }
676}
677
678impl<E: ProcessEdgesWork> GCWork<E::VM> for E {
679    fn do_work(&mut self, worker: &mut GCWorker<E::VM>, _mmtk: &'static MMTK<E::VM>) {
680        self.set_worker(worker);
681        self.process_slots();
682        if !self.nodes.is_empty() {
683            self.flush();
684        }
685        #[cfg(feature = "sanity")]
686        if self.roots && !_mmtk.is_in_sanity() {
687            self.cache_roots_for_sanity_gc();
688        }
689        trace!("ProcessEdgesWork End");
690    }
691}
692
693/// A general implementation of [`ProcessEdgesWork`] using SFT. A plan can always implement their
694/// own [`ProcessEdgesWork`] instances. However, most plans can use this work packet for tracing amd
695/// they do not need to provide a plan-specific trace object work packet. If they choose to use this
696/// type, they need to provide a correct implementation for some related methods (such as
697/// `Space.set_copy_for_sft_trace()`, `SFT.sft_trace_object()`). Some plans are not using this type,
698/// mostly due to more complex tracing. Either it is impossible to use this type, or there is
699/// performance overheads for using this general trace type. In such cases, they implement their
700/// specific [`ProcessEdgesWork`] instances.
701// TODO: This is not used any more. Should we remove it?
702#[allow(dead_code)]
703pub struct SFTProcessEdges<VM: VMBinding> {
704    pub base: ProcessEdgesBase<VM>,
705}
706
707impl<VM: VMBinding> ProcessEdgesWork for SFTProcessEdges<VM> {
708    type VM = VM;
709    type ScanObjectsWorkType = ScanObjects<Self>;
710
711    fn new(
712        slots: Vec<SlotOf<Self>>,
713        roots: bool,
714        mmtk: &'static MMTK<VM>,
715        bucket: WorkBucketStage,
716    ) -> Self {
717        let base = ProcessEdgesBase::new(slots, roots, mmtk, bucket);
718        Self { base }
719    }
720
721    fn trace_object(&mut self, object: ObjectReference) -> ObjectReference {
722        use crate::policy::sft::GCWorkerMutRef;
723
724        // Erase <VM> type parameter
725        let worker = GCWorkerMutRef::new(self.worker());
726
727        // Invoke trace object on sft
728        let sft = unsafe { crate::mmtk::SFT_MAP.get_unchecked(object.to_raw_address()) };
729        sft.sft_trace_object(&mut self.base.nodes, object, worker)
730    }
731
732    fn create_scan_work(&self, nodes: Vec<ObjectReference>) -> Option<ScanObjects<Self>> {
733        Some(ScanObjects::<Self>::new(nodes, false, self.bucket))
734    }
735}
736
737/// An implementation of `RootsWorkFactory` that creates work packets based on `ProcessEdgesWork`
738/// for handling roots.  The `DPE` and the `PPE` type parameters correspond to the
739/// `DefaultProcessEdge` and the `PinningProcessEdges` type members of the [`GCWorkContext`] trait.
740pub(crate) struct ProcessEdgesWorkRootsWorkFactory<
741    VM: VMBinding,
742    DPE: ProcessEdgesWork<VM = VM>,
743    PPE: ProcessEdgesWork<VM = VM>,
744> {
745    mmtk: &'static MMTK<VM>,
746    phantom: PhantomData<(DPE, PPE)>,
747}
748
749impl<VM: VMBinding, DPE: ProcessEdgesWork<VM = VM>, PPE: ProcessEdgesWork<VM = VM>> Clone
750    for ProcessEdgesWorkRootsWorkFactory<VM, DPE, PPE>
751{
752    fn clone(&self) -> Self {
753        Self {
754            mmtk: self.mmtk,
755            phantom: PhantomData,
756        }
757    }
758}
759
760/// For USDT tracepoints for roots.
761/// Keep in sync with `tools/tracing/timeline/visualize.py`.
762#[repr(usize)]
763enum RootsKind {
764    NORMAL = 0,
765    PINNING = 1,
766    TPINNING = 2,
767}
768
769impl<VM: VMBinding, DPE: ProcessEdgesWork<VM = VM>, PPE: ProcessEdgesWork<VM = VM>>
770    RootsWorkFactory<VM::VMSlot> for ProcessEdgesWorkRootsWorkFactory<VM, DPE, PPE>
771{
772    fn create_process_roots_work(&mut self, slots: Vec<VM::VMSlot>) {
773        // Note: We should use the same USDT name "mmtk:roots" for all the three kinds of roots. A
774        // VM binding may not call all of the three methods in this impl. For example, the OpenJDK
775        // binding only calls `create_process_roots_work`, and the Ruby binding only calls
776        // `create_process_pinning_roots_work`. Because `ProcessEdgesWorkRootsWorkFactory<VM, DPE,
777        // PPE>` is a generic type, the Rust compiler emits the function bodies on demand, so the
778        // resulting machine code may not contain all three USDT trace points.  If they have
779        // different names, and our `capture.bt` mentions all of them, `bpftrace` may complain that
780        // it cannot find one or more of those USDT trace points in the binary.
781        probe!(mmtk, roots, RootsKind::NORMAL, slots.len());
782        crate::memory_manager::add_work_packet(
783            self.mmtk,
784            WorkBucketStage::Closure,
785            DPE::new(slots, true, self.mmtk, WorkBucketStage::Closure),
786        );
787    }
788
789    fn create_process_pinning_roots_work(&mut self, nodes: Vec<ObjectReference>) {
790        probe!(mmtk, roots, RootsKind::PINNING, nodes.len());
791        // Will process roots within the PinningRootsTrace bucket
792        // And put work in the Closure bucket
793        crate::memory_manager::add_work_packet(
794            self.mmtk,
795            WorkBucketStage::PinningRootsTrace,
796            ProcessRootNodes::<VM, PPE, DPE>::new(nodes, WorkBucketStage::Closure),
797        );
798    }
799
800    fn create_process_tpinning_roots_work(&mut self, nodes: Vec<ObjectReference>) {
801        probe!(mmtk, roots, RootsKind::TPINNING, nodes.len());
802        crate::memory_manager::add_work_packet(
803            self.mmtk,
804            WorkBucketStage::TPinningClosure,
805            ProcessRootNodes::<VM, PPE, PPE>::new(nodes, WorkBucketStage::TPinningClosure),
806        );
807    }
808}
809
810impl<VM: VMBinding, DPE: ProcessEdgesWork<VM = VM>, PPE: ProcessEdgesWork<VM = VM>>
811    ProcessEdgesWorkRootsWorkFactory<VM, DPE, PPE>
812{
813    fn new(mmtk: &'static MMTK<VM>) -> Self {
814        Self {
815            mmtk,
816            phantom: PhantomData,
817        }
818    }
819}
820
821impl<VM: VMBinding> Deref for SFTProcessEdges<VM> {
822    type Target = ProcessEdgesBase<VM>;
823    fn deref(&self) -> &Self::Target {
824        &self.base
825    }
826}
827
828impl<VM: VMBinding> DerefMut for SFTProcessEdges<VM> {
829    fn deref_mut(&mut self) -> &mut Self::Target {
830        &mut self.base
831    }
832}
833
834/// Trait for a work packet that scans objects
835pub trait ScanObjectsWork<VM: VMBinding>: GCWork<VM> + Sized {
836    /// The associated ProcessEdgesWork for processing the outgoing edges of the objects in this
837    /// packet.
838    type E: ProcessEdgesWork<VM = VM>;
839
840    /// Called after each object is scanned.
841    fn post_scan_object(&self, object: ObjectReference);
842
843    /// Return the work bucket for this work packet and its derived work packets.
844    fn get_bucket(&self) -> WorkBucketStage;
845
846    /// The common code for ScanObjects and PlanScanObjects.
847    fn do_work_common(
848        &self,
849        buffer: &[ObjectReference],
850        worker: &mut GCWorker<<Self::E as ProcessEdgesWork>::VM>,
851        mmtk: &'static MMTK<<Self::E as ProcessEdgesWork>::VM>,
852    ) {
853        let tls = worker.tls;
854
855        let objects_to_scan = buffer;
856
857        // Scan the objects in the list that supports slot-enququing.
858        let mut scan_later = vec![];
859        {
860            let mut closure = ObjectsClosure::<Self::E>::new(worker, self.get_bucket());
861
862            // For any object we need to scan, we count its live bytes.
863            // Check the option outside the loop for better performance.
864            if crate::util::rust_util::unlikely(*mmtk.get_options().count_live_bytes_in_gc) {
865                // Borrow before the loop.
866                let mut live_bytes_stats = closure.worker.shared.live_bytes_per_space.borrow_mut();
867                for object in objects_to_scan.iter().copied() {
868                    crate::scheduler::worker::GCWorkerShared::<VM>::increase_live_bytes(
869                        &mut live_bytes_stats,
870                        object,
871                    );
872                }
873            }
874
875            for object in objects_to_scan.iter().copied() {
876                if <VM as VMBinding>::VMScanning::support_slot_enqueuing(tls, object) {
877                    trace!("Scan object (slot) {}", object);
878                    // If an object supports slot-enqueuing, we enqueue its slots.
879                    <VM as VMBinding>::VMScanning::scan_object(tls, object, &mut closure);
880                    self.post_scan_object(object);
881                } else {
882                    // If an object does not support slot-enqueuing, we have to use
883                    // `Scanning::scan_object_and_trace_edges` and offload the job of updating the
884                    // reference field to the VM.
885                    //
886                    // However, at this point, `closure` is borrowing `worker`.
887                    // So we postpone the processing of objects that needs object enqueuing
888                    scan_later.push(object);
889                }
890            }
891        }
892
893        let total_objects = objects_to_scan.len();
894        let scan_and_trace = scan_later.len();
895        probe!(mmtk, scan_objects, total_objects, scan_and_trace);
896
897        // If any object does not support slot-enqueuing, we process them now.
898        if !scan_later.is_empty() {
899            let object_tracer_context = ProcessEdgesWorkTracerContext::<Self::E> {
900                stage: self.get_bucket(),
901                phantom_data: PhantomData,
902            };
903
904            object_tracer_context.with_tracer(worker, |object_tracer| {
905                // Scan objects and trace their outgoing edges at the same time.
906                for object in scan_later.iter().copied() {
907                    trace!("Scan object (node) {}", object);
908                    <VM as VMBinding>::VMScanning::scan_object_and_trace_edges(
909                        tls,
910                        object,
911                        object_tracer,
912                    );
913                    self.post_scan_object(object);
914                }
915            });
916        }
917    }
918}
919
920/// Scan objects and enqueue the slots of the objects.  For objects that do not support
921/// slot-enqueuing, this work packet also traces their outgoing edges directly.
922///
923/// This work packet does not execute policy-specific post-scanning hooks
924/// (it won't call `post_scan_object()` in [`policy::gc_work::PolicyTraceObject`]).
925/// It should be used only for policies that do not perform policy-specific actions when scanning
926/// an object.
927pub struct ScanObjects<Edges: ProcessEdgesWork> {
928    buffer: Vec<ObjectReference>,
929    #[allow(unused)]
930    concurrent: bool,
931    phantom: PhantomData<Edges>,
932    bucket: WorkBucketStage,
933}
934
935impl<Edges: ProcessEdgesWork> ScanObjects<Edges> {
936    pub fn new(buffer: Vec<ObjectReference>, concurrent: bool, bucket: WorkBucketStage) -> Self {
937        Self {
938            buffer,
939            concurrent,
940            phantom: PhantomData,
941            bucket,
942        }
943    }
944}
945
946impl<VM: VMBinding, E: ProcessEdgesWork<VM = VM>> ScanObjectsWork<VM> for ScanObjects<E> {
947    type E = E;
948
949    fn get_bucket(&self) -> WorkBucketStage {
950        self.bucket
951    }
952
953    fn post_scan_object(&self, _object: ObjectReference) {
954        // Do nothing.
955    }
956}
957
958impl<E: ProcessEdgesWork> GCWork<E::VM> for ScanObjects<E> {
959    fn do_work(&mut self, worker: &mut GCWorker<E::VM>, mmtk: &'static MMTK<E::VM>) {
960        trace!("ScanObjects");
961        self.do_work_common(&self.buffer, worker, mmtk);
962        trace!("ScanObjects End");
963    }
964}
965
966use crate::mmtk::MMTK;
967use crate::plan::Plan;
968use crate::plan::PlanTraceObject;
969use crate::policy::gc_work::TraceKind;
970
971/// This provides an implementation of [`crate::scheduler::gc_work::ProcessEdgesWork`]. A plan that implements
972/// `PlanTraceObject` can use this work packet for tracing objects.
973pub struct PlanProcessEdges<
974    VM: VMBinding,
975    P: Plan<VM = VM> + PlanTraceObject<VM>,
976    const KIND: TraceKind,
977> {
978    plan: &'static P,
979    base: ProcessEdgesBase<VM>,
980}
981
982impl<VM: VMBinding, P: PlanTraceObject<VM> + Plan<VM = VM>, const KIND: TraceKind> ProcessEdgesWork
983    for PlanProcessEdges<VM, P, KIND>
984{
985    type VM = VM;
986    type ScanObjectsWorkType = PlanScanObjects<Self, P>;
987
988    fn new(
989        slots: Vec<SlotOf<Self>>,
990        roots: bool,
991        mmtk: &'static MMTK<VM>,
992        bucket: WorkBucketStage,
993    ) -> Self {
994        let base = ProcessEdgesBase::new(slots, roots, mmtk, bucket);
995        let plan = base.plan().downcast_ref::<P>().unwrap();
996        Self { plan, base }
997    }
998
999    fn create_scan_work(&self, nodes: Vec<ObjectReference>) -> Option<Self::ScanObjectsWorkType> {
1000        Some(PlanScanObjects::<Self, P>::new(
1001            self.plan,
1002            nodes,
1003            false,
1004            self.bucket,
1005        ))
1006    }
1007
1008    fn trace_object(&mut self, object: ObjectReference) -> ObjectReference {
1009        // We cannot borrow `self` twice in a call, so we extract `worker` as a local variable.
1010        let worker = self.worker();
1011        self.plan
1012            .trace_object::<VectorObjectQueue, KIND>(&mut self.base.nodes, object, worker)
1013    }
1014
1015    fn process_slot(&mut self, slot: SlotOf<Self>) {
1016        let Some(object) = slot.load() else {
1017            // Skip slots that are not holding an object reference.
1018            return;
1019        };
1020        let new_object = self.trace_object(object);
1021        if P::may_move_objects::<KIND>() && new_object != object {
1022            slot.store(new_object);
1023        }
1024    }
1025}
1026
1027// Impl Deref/DerefMut to ProcessEdgesBase for PlanProcessEdges
1028impl<VM: VMBinding, P: PlanTraceObject<VM> + Plan<VM = VM>, const KIND: TraceKind> Deref
1029    for PlanProcessEdges<VM, P, KIND>
1030{
1031    type Target = ProcessEdgesBase<VM>;
1032    fn deref(&self) -> &Self::Target {
1033        &self.base
1034    }
1035}
1036
1037impl<VM: VMBinding, P: PlanTraceObject<VM> + Plan<VM = VM>, const KIND: TraceKind> DerefMut
1038    for PlanProcessEdges<VM, P, KIND>
1039{
1040    fn deref_mut(&mut self) -> &mut Self::Target {
1041        &mut self.base
1042    }
1043}
1044
1045/// This is an alternative to `ScanObjects` that calls the `post_scan_object` of the policy
1046/// selected by the plan.  It is applicable to plans that derive `PlanTraceObject`.
1047pub struct PlanScanObjects<E: ProcessEdgesWork, P: Plan<VM = E::VM> + PlanTraceObject<E::VM>> {
1048    plan: &'static P,
1049    buffer: Vec<ObjectReference>,
1050    #[allow(dead_code)]
1051    concurrent: bool,
1052    phantom: PhantomData<E>,
1053    bucket: WorkBucketStage,
1054}
1055
1056impl<E: ProcessEdgesWork, P: Plan<VM = E::VM> + PlanTraceObject<E::VM>> PlanScanObjects<E, P> {
1057    pub fn new(
1058        plan: &'static P,
1059        buffer: Vec<ObjectReference>,
1060        concurrent: bool,
1061        bucket: WorkBucketStage,
1062    ) -> Self {
1063        Self {
1064            plan,
1065            buffer,
1066            concurrent,
1067            phantom: PhantomData,
1068            bucket,
1069        }
1070    }
1071}
1072
1073impl<E: ProcessEdgesWork, P: Plan<VM = E::VM> + PlanTraceObject<E::VM>> ScanObjectsWork<E::VM>
1074    for PlanScanObjects<E, P>
1075{
1076    type E = E;
1077
1078    fn get_bucket(&self) -> WorkBucketStage {
1079        self.bucket
1080    }
1081
1082    fn post_scan_object(&self, object: ObjectReference) {
1083        self.plan.post_scan_object(object);
1084    }
1085}
1086
1087impl<E: ProcessEdgesWork, P: Plan<VM = E::VM> + PlanTraceObject<E::VM>> GCWork<E::VM>
1088    for PlanScanObjects<E, P>
1089{
1090    fn do_work(&mut self, worker: &mut GCWorker<E::VM>, mmtk: &'static MMTK<E::VM>) {
1091        trace!("PlanScanObjects");
1092        self.do_work_common(&self.buffer, worker, mmtk);
1093        trace!("PlanScanObjects End");
1094    }
1095}
1096
1097/// This work packet processes pinning roots.
1098///
1099/// The `roots` member holds a list of `ObjectReference` to objects directly pointed by roots.
1100/// These objects will be traced using `R2OPE` (Root-to-Object Process Edges).
1101///
1102/// After that, it will create work packets for tracing their children.  Those work packets (and
1103/// the work packets further created by them) will use `O2OPE` (Object-to-Object Process Edges) as
1104/// their `ProcessEdgesWork` implementations.
1105///
1106/// Because `roots` are pinning roots, `R2OPE` must be a `ProcessEdgesWork` that never moves any
1107/// object.
1108///
1109/// The choice of `O2OPE` determines whether the `roots` are transitively pinning or not.
1110///
1111/// -   If `O2OPE` is set to a `ProcessEdgesWork` that never moves objects, all descendents of
1112///     `roots` will not be moved in this GC.  That implements transitive pinning roots.
1113/// -   If `O2OPE` may move objects, then this `ProcessRootsNode<VM, R2OPE, O2OPE>` work packet
1114///     will only pin the objects in `roots` (because `R2OPE` must not move objects anyway), but
1115///     not their descendents.
1116pub(crate) struct ProcessRootNodes<
1117    VM: VMBinding,
1118    R2OPE: ProcessEdgesWork<VM = VM>,
1119    O2OPE: ProcessEdgesWork<VM = VM>,
1120> {
1121    phantom: PhantomData<(VM, R2OPE, O2OPE)>,
1122    roots: Vec<ObjectReference>,
1123    bucket: WorkBucketStage,
1124}
1125
1126impl<VM: VMBinding, R2OPE: ProcessEdgesWork<VM = VM>, O2OPE: ProcessEdgesWork<VM = VM>>
1127    ProcessRootNodes<VM, R2OPE, O2OPE>
1128{
1129    pub fn new(nodes: Vec<ObjectReference>, bucket: WorkBucketStage) -> Self {
1130        Self {
1131            phantom: PhantomData,
1132            roots: nodes,
1133            bucket,
1134        }
1135    }
1136}
1137
1138impl<VM: VMBinding, R2OPE: ProcessEdgesWork<VM = VM>, O2OPE: ProcessEdgesWork<VM = VM>> GCWork<VM>
1139    for ProcessRootNodes<VM, R2OPE, O2OPE>
1140{
1141    fn do_work(&mut self, worker: &mut GCWorker<VM>, mmtk: &'static MMTK<VM>) {
1142        trace!("ProcessRootNodes");
1143
1144        #[cfg(feature = "sanity")]
1145        {
1146            if !mmtk.is_in_sanity() {
1147                mmtk.sanity_checker
1148                    .lock()
1149                    .unwrap()
1150                    .add_root_nodes(self.roots.clone());
1151            }
1152        }
1153
1154        let num_roots = self.roots.len();
1155
1156        // This step conceptually traces the edges from root slots to the objects they point to.
1157        // However, VMs that deliver root objects instead of root slots are incapable of updating
1158        // root slots.  Therefore, we call `trace_object` on those objects, and assert the GC
1159        // doesn't move those objects because we cannot store the updated references back to the
1160        // slots.
1161        //
1162        // The `root_objects_to_scan` variable will hold those root objects which are traced for the
1163        // first time.  We will create a work packet for scanning those roots.
1164        let root_objects_to_scan = {
1165            // We create an instance of E to use its `trace_object` method and its object queue.
1166            let mut process_edges_work =
1167                R2OPE::new(vec![], true, mmtk, WorkBucketStage::PinningRootsTrace);
1168            process_edges_work.set_worker(worker);
1169
1170            for object in self.roots.iter().copied() {
1171                let new_object = process_edges_work.trace_object(object);
1172                debug_assert_eq!(
1173                    object, new_object,
1174                    "Object moved while tracing root unmovable root object: {} -> {}",
1175                    object, new_object
1176                );
1177            }
1178
1179            // This contains root objects that are visited the first time.
1180            // It is sufficient to only scan these objects.
1181            process_edges_work.nodes.take()
1182        };
1183
1184        let num_enqueued_nodes = root_objects_to_scan.len();
1185        probe!(mmtk, process_root_nodes, num_roots, num_enqueued_nodes);
1186
1187        if !root_objects_to_scan.is_empty() {
1188            let mut process_edges_work = O2OPE::new(vec![], true, mmtk, self.bucket);
1189            process_edges_work.set_worker(worker);
1190            if let Some(work) = process_edges_work.create_scan_work(root_objects_to_scan) {
1191                crate::memory_manager::add_work_packet(mmtk, self.bucket, work);
1192            }
1193        }
1194
1195        trace!("ProcessRootNodes End");
1196    }
1197}
1198
1199/// A `ProcessEdgesWork` type that panics when any of its method is used.
1200/// This is currently used for plans that do not support transitively pinning.
1201#[derive(Default)]
1202pub struct UnsupportedProcessEdges<VM: VMBinding> {
1203    phantom: PhantomData<VM>,
1204}
1205
1206impl<VM: VMBinding> Deref for UnsupportedProcessEdges<VM> {
1207    type Target = ProcessEdgesBase<VM>;
1208    fn deref(&self) -> &Self::Target {
1209        panic!("unsupported!")
1210    }
1211}
1212
1213impl<VM: VMBinding> DerefMut for UnsupportedProcessEdges<VM> {
1214    fn deref_mut(&mut self) -> &mut Self::Target {
1215        panic!("unsupported!")
1216    }
1217}
1218
1219impl<VM: VMBinding> ProcessEdgesWork for UnsupportedProcessEdges<VM> {
1220    type VM = VM;
1221
1222    type ScanObjectsWorkType = ScanObjects<Self>;
1223
1224    fn new(
1225        _slots: Vec<SlotOf<Self>>,
1226        _roots: bool,
1227        _mmtk: &'static MMTK<Self::VM>,
1228        _bucket: WorkBucketStage,
1229    ) -> Self {
1230        panic!("unsupported!")
1231    }
1232
1233    fn trace_object(&mut self, _object: ObjectReference) -> ObjectReference {
1234        panic!("unsupported!")
1235    }
1236
1237    fn create_scan_work(&self, _nodes: Vec<ObjectReference>) -> Option<Self::ScanObjectsWorkType> {
1238        panic!("unsupported!")
1239    }
1240}