mmtk/util/
reference_processor.rs

1use std::collections::HashSet;
2use std::sync::atomic::AtomicBool;
3use std::sync::atomic::Ordering;
4use std::sync::Mutex;
5use std::vec::Vec;
6
7use crate::plan::is_nursery_gc;
8use crate::scheduler::ProcessEdgesWork;
9use crate::scheduler::WorkBucketStage;
10use crate::util::ObjectReference;
11use crate::util::VMWorkerThread;
12use crate::vm::ReferenceGlue;
13use crate::vm::VMBinding;
14
15/// Holds all reference processors for each weak reference Semantics.
16/// Currently this is based on Java's weak reference semantics (soft/weak/phantom).
17/// We should make changes to make this general rather than Java specific.
18pub struct ReferenceProcessors {
19    soft: ReferenceProcessor,
20    weak: ReferenceProcessor,
21    phantom: ReferenceProcessor,
22}
23
24impl ReferenceProcessors {
25    pub fn new() -> Self {
26        ReferenceProcessors {
27            soft: ReferenceProcessor::new(Semantics::SOFT),
28            weak: ReferenceProcessor::new(Semantics::WEAK),
29            phantom: ReferenceProcessor::new(Semantics::PHANTOM),
30        }
31    }
32
33    pub fn get(&self, semantics: Semantics) -> &ReferenceProcessor {
34        match semantics {
35            Semantics::SOFT => &self.soft,
36            Semantics::WEAK => &self.weak,
37            Semantics::PHANTOM => &self.phantom,
38        }
39    }
40
41    pub fn add_soft_candidate(&self, reff: ObjectReference) {
42        trace!("Add soft candidate: {}", reff);
43        self.soft.add_candidate(reff);
44    }
45
46    pub fn add_weak_candidate(&self, reff: ObjectReference) {
47        trace!("Add weak candidate: {}", reff);
48        self.weak.add_candidate(reff);
49    }
50
51    pub fn add_phantom_candidate(&self, reff: ObjectReference) {
52        trace!("Add phantom candidate: {}", reff);
53        self.phantom.add_candidate(reff);
54    }
55
56    /// This will invoke enqueue for each reference processor, which will
57    /// call back to the VM to enqueue references whose referents are cleared
58    /// in this GC.
59    pub fn enqueue_refs<VM: VMBinding>(&self, tls: VMWorkerThread) {
60        self.soft.enqueue::<VM>(tls);
61        self.weak.enqueue::<VM>(tls);
62        self.phantom.enqueue::<VM>(tls);
63    }
64
65    /// A separate reference forwarding step. Normally when we scan refs, we deal with forwarding.
66    /// However, for some plans like mark compact, at the point we do ref scanning, we do not know
67    /// the forwarding addresses yet, thus we cannot do forwarding during scan refs. And for those
68    /// plans, this separate step is required.
69    pub fn forward_refs<E: ProcessEdgesWork>(&self, trace: &mut E, mmtk: &'static MMTK<E::VM>) {
70        debug_assert!(
71            mmtk.get_plan().constraints().needs_forward_after_liveness,
72            "A plan with needs_forward_after_liveness=false does not need a separate forward step"
73        );
74        self.soft
75            .forward::<E>(trace, is_nursery_gc(mmtk.get_plan()));
76        self.weak
77            .forward::<E>(trace, is_nursery_gc(mmtk.get_plan()));
78        self.phantom
79            .forward::<E>(trace, is_nursery_gc(mmtk.get_plan()));
80    }
81
82    // Methods for scanning weak references. It needs to be called in a decreasing order of reference strengths, i.e. soft > weak > phantom
83
84    pub fn retain_soft_refs<E: ProcessEdgesWork>(&self, trace: &mut E, mmtk: &'static MMTK<E::VM>) {
85        self.soft.retain::<E>(trace, is_nursery_gc(mmtk.get_plan()));
86    }
87
88    /// Scan soft references.
89    pub fn scan_soft_refs<VM: VMBinding>(&self, mmtk: &'static MMTK<VM>) {
90        // This will update the references (and the referents).
91        self.soft.scan::<VM>(is_nursery_gc(mmtk.get_plan()));
92    }
93
94    /// Scan weak references.
95    pub fn scan_weak_refs<VM: VMBinding>(&self, mmtk: &'static MMTK<VM>) {
96        self.weak.scan::<VM>(is_nursery_gc(mmtk.get_plan()));
97    }
98
99    /// Scan phantom references.
100    pub fn scan_phantom_refs<VM: VMBinding>(&self, mmtk: &'static MMTK<VM>) {
101        self.phantom.scan::<VM>(is_nursery_gc(mmtk.get_plan()));
102    }
103}
104
105impl Default for ReferenceProcessors {
106    fn default() -> Self {
107        Self::new()
108    }
109}
110
111// XXX: We differ from the original implementation
112//      by ignoring "stress," i.e. where the array
113//      of references is grown by 1 each time. We
114//      can't do this here b/c std::vec::Vec doesn't
115//      allow us to customize its behaviour like that.
116//      (Similarly, GROWTH_FACTOR is locked at 2.0, but
117//      luckily this is also the value used by Java MMTk.)
118const INITIAL_SIZE: usize = 256;
119
120/// We create a reference processor for each semantics. Generally we expect these
121/// to happen for each processor:
122/// 1. The VM adds reference candidates. They could either do it when a weak reference
123///    is created, or when a weak reference is traced during GC.
124/// 2. We scan references after the GC determins liveness.
125/// 3. We forward references if the GC needs forwarding after liveness.
126/// 4. We inform the binding of references whose referents are cleared during this GC by enqueue'ing.
127pub struct ReferenceProcessor {
128    /// Most of the reference processor is protected by a mutex.
129    sync: Mutex<ReferenceProcessorSync>,
130
131    /// The semantics for the reference processor
132    semantics: Semantics,
133
134    /// Is it allowed to add candidate to this reference processor? The value is true for most of the time,
135    /// but it is set to false once we finish forwarding references, at which point we do not expect to encounter
136    /// any 'new' reference in the same GC. This makes sure that no new entry will be added to our reference table once
137    /// we finish forwarding, as we will not be able to process the entry in that GC.
138    // This avoids an issue in the following scenario in mark compact:
139    // 1. First trace: add a candidate WR
140    // 2. Weak reference scan: scan the reference table, as MC does not forward object in the first trace. This scan does not update any reference.
141    // 3. Second trace: call add_candidate again with WR, but WR gets ignored as we already have WR in our reference table.
142    // 4. Weak reference forward: call trace_object for WR, which pushes WR to the node buffer and update WR -> WR' in our reference table.
143    // 5. When we trace objects in the node buffer, we will attempt to add WR as a candidate. As we have updated WR to WR' in our reference
144    //    table, we would accept WR as a candidate. But we will not trace WR again, and WR will be invalid after this GC.
145    // This flag is set to false after Step 4, so in Step 5, we will ignore adding WR.
146    allow_new_candidate: AtomicBool,
147}
148
149#[derive(Debug, PartialEq, Clone, Copy)]
150pub enum Semantics {
151    SOFT,
152    WEAK,
153    PHANTOM,
154}
155
156struct ReferenceProcessorSync {
157    /// The table of reference objects for the current semantics. We add references to this table by
158    /// add_candidate(). After scanning this table, a reference in the table should either
159    /// stay in the table (if the referent is alive) or go to enqueued_reference (if the referent is dead and cleared).
160    /// Note that this table should not have duplicate entries, otherwise we will scan the duplicates multiple times, and
161    /// that may lead to incorrect results.
162    references: HashSet<ObjectReference>,
163
164    /// References whose referents are cleared during this GC. We add references to this table during
165    /// scanning, and we pop from this table during the enqueue work at the end of GC.
166    enqueued_references: Vec<ObjectReference>,
167
168    /// Index into the references table for the start of nursery objects
169    nursery_index: usize,
170}
171
172impl ReferenceProcessor {
173    pub fn new(semantics: Semantics) -> Self {
174        ReferenceProcessor {
175            sync: Mutex::new(ReferenceProcessorSync {
176                references: HashSet::with_capacity(INITIAL_SIZE),
177                enqueued_references: vec![],
178                nursery_index: 0,
179            }),
180            semantics,
181            allow_new_candidate: AtomicBool::new(true),
182        }
183    }
184
185    /// Add a candidate.
186    pub fn add_candidate(&self, reff: ObjectReference) {
187        if !self.allow_new_candidate.load(Ordering::SeqCst) {
188            return;
189        }
190
191        let mut sync = self.sync.lock().unwrap();
192        sync.references.insert(reff);
193    }
194
195    fn disallow_new_candidate(&self) {
196        self.allow_new_candidate.store(false, Ordering::SeqCst);
197    }
198
199    fn allow_new_candidate(&self) {
200        self.allow_new_candidate.store(true, Ordering::SeqCst);
201    }
202
203    // These functions call `ObjectReference::get_forwarded_object`, not `trace_object()`.
204    // They are used by steps that do not expand the transitive closure.  Processing weak and
205    // phantom references never expand the transitive closure.  Soft references, when not retained,
206    // do not expand the transitive closure, either.
207    // These functions are intended to make the code easier to understand.
208
209    /// Return the new `ObjectReference` of a referent if it is already moved, or its current
210    /// `ObjectReference` otherwise.  The referent must be live when calling this function.
211    fn get_forwarded_referent(referent: ObjectReference) -> ObjectReference {
212        debug_assert!(referent.is_live());
213        referent.get_forwarded_object().unwrap_or(referent)
214    }
215
216    /// Return the new `ObjectReference` of a reference object if it is already moved, or its
217    /// current `ObjectReference` otherwise.  The reference object must be live when calling this
218    /// function.
219    fn get_forwarded_reference(object: ObjectReference) -> ObjectReference {
220        debug_assert!(object.is_live());
221        object.get_forwarded_object().unwrap_or(object)
222    }
223
224    // These funcions call `trace_object()`, which will ensure the object and its descendents will
225    // be traced.  They are only called in steps that expand the transitive closure.  That include
226    // retaining soft references, and (for MarkCompact) tracing objects for forwarding.
227    // Note that finalizers also expand the transitive closure.
228    // These functions are intended to make the code easier to understand.
229
230    /// This function is called when retaining soft reference.  It
231    /// -   keeps the referent alive, and
232    /// -   adds the referent to the tracing queue if not yet reached, so that its children will be
233    ///     kept alive, too, and
234    /// -   gets the new object reference of the referent if it is moved.
235    fn keep_referent_alive<E: ProcessEdgesWork>(
236        e: &mut E,
237        referent: ObjectReference,
238    ) -> ObjectReference {
239        e.trace_object(referent)
240    }
241
242    /// This function is called when forwarding the references and referents (for MarkCompact). It
243    /// -   adds the reference or the referent to the tracing queue if not yet reached, so that
244    ///     the children of the reference or referent will be visited and forwarded, too, and
245    /// -   gets the forwarded object reference of the object.
246    fn trace_forward_object<E: ProcessEdgesWork>(
247        e: &mut E,
248        referent: ObjectReference,
249    ) -> ObjectReference {
250        e.trace_object(referent)
251    }
252
253    /// Inform the binding to enqueue the weak references whose referents were cleared in this GC.
254    pub fn enqueue<VM: VMBinding>(&self, tls: VMWorkerThread) {
255        // We will acquire a lock below. If anyone tries to insert new weak refs which will acquire the same lock, a deadlock will occur.
256        // This does happen for OpenJDK with ConcurrentImmix where a write barrier is triggered during the enqueueing of weak references,
257        // and the write barrier scans the objects and attempts to add new weak references.
258        // Disallow new candidates to prevent the deadlock.
259        self.disallow_new_candidate();
260        let mut sync = self.sync.lock().unwrap();
261
262        // This is the end of a GC. We do some assertions here to make sure our reference tables are correct.
263        #[cfg(debug_assertions)]
264        {
265            // For references in the table, the reference needs to be valid, and if the referent is not cleared, it should be valid as well
266            sync.references.iter().for_each(|reff| {
267                debug_assert!(reff.is_in_any_space());
268                if let Some(referent) = VM::VMReferenceGlue::get_referent(*reff) {
269                    debug_assert!(
270                        referent.is_in_any_space(),
271                        "Referent {:?} (of reference {:?}) is not in any space",
272                        referent,
273                        reff
274                    );
275                }
276            });
277            // For references that will be enqueue'd, the reference needs to be valid, and the referent needs to be cleared.
278            sync.enqueued_references.iter().for_each(|reff| {
279                debug_assert!(reff.is_in_any_space());
280                let maybe_referent = VM::VMReferenceGlue::get_referent(*reff);
281                debug_assert!(maybe_referent.is_none());
282            });
283        }
284
285        if !sync.enqueued_references.is_empty() {
286            trace!("enqueue: {:?}", sync.enqueued_references);
287            VM::VMReferenceGlue::enqueue_references(&sync.enqueued_references, tls);
288            sync.enqueued_references.clear();
289        }
290
291        self.allow_new_candidate();
292    }
293
294    /// Forward the reference tables in the reference processor. This is only needed if a plan does not forward
295    /// objects in their first transitive closure.
296    /// nursery is not used for this.
297    pub fn forward<E: ProcessEdgesWork>(&self, trace: &mut E, _nursery: bool) {
298        let mut sync = self.sync.lock().unwrap();
299        debug!("Starting ReferenceProcessor.forward({:?})", self.semantics);
300
301        // Forward a single reference
302        fn forward_reference<E: ProcessEdgesWork>(
303            trace: &mut E,
304            reference: ObjectReference,
305        ) -> ObjectReference {
306            {
307                use crate::vm::ObjectModel;
308                trace!(
309                    "Forwarding reference: {} (size: {})",
310                    reference,
311                    <E::VM as VMBinding>::VMObjectModel::get_current_size(reference)
312                );
313            }
314
315            if let Some(old_referent) =
316                <E::VM as VMBinding>::VMReferenceGlue::get_referent(reference)
317            {
318                let new_referent = ReferenceProcessor::trace_forward_object(trace, old_referent);
319                <E::VM as VMBinding>::VMReferenceGlue::set_referent(reference, new_referent);
320
321                trace!(
322                    " referent: {} (forwarded to {})",
323                    old_referent,
324                    new_referent
325                );
326            }
327
328            let new_reference = ReferenceProcessor::trace_forward_object(trace, reference);
329            trace!(" reference: forwarded to {}", new_reference);
330
331            new_reference
332        }
333
334        sync.references = sync
335            .references
336            .iter()
337            .map(|reff| forward_reference::<E>(trace, *reff))
338            .collect();
339
340        sync.enqueued_references = sync
341            .enqueued_references
342            .iter()
343            .map(|reff| forward_reference::<E>(trace, *reff))
344            .collect();
345
346        debug!("Ending ReferenceProcessor.forward({:?})", self.semantics);
347
348        // We finish forwarding. No longer accept new candidates.
349        self.disallow_new_candidate();
350    }
351
352    /// Scan the reference table, and update each reference/referent.
353    /// It doesn't keep the reference or the referent alive.
354    // TODO: nursery is currently ignored. We used to use Vec for the reference table, and use an int
355    // to point to the reference that we last scanned. However, when we use HashSet for reference table,
356    // we can no longer do that.
357    fn scan<VM: VMBinding>(&self, _nursery: bool) {
358        let mut sync = self.sync.lock().unwrap();
359
360        debug!("Starting ReferenceProcessor.scan({:?})", self.semantics);
361
362        trace!(
363            "{:?} Reference table is {:?}",
364            self.semantics,
365            sync.references
366        );
367
368        //debug_assert!(sync.enqueued_references.is_empty());
369        // Put enqueued reference in this vec
370        let mut enqueued_references = vec![];
371
372        // Determinine liveness for each reference and only keep the refs if `process_reference()` returns Some.
373        let new_set: HashSet<ObjectReference> = sync
374            .references
375            .iter()
376            .filter_map(|reff| self.process_reference::<VM>(*reff, &mut enqueued_references))
377            .collect();
378
379        let num_old = sync.references.len();
380        let num_new = new_set.len();
381        let num_enqueued = enqueued_references.len();
382
383        debug!(
384            "{:?} reference table from {} to {} ({} enqueued)",
385            self.semantics, num_old, num_new, num_enqueued,
386        );
387
388        let semantics_int = self.semantics as usize;
389
390        probe!(
391            mmtk,
392            reference_scanned,
393            semantics_int,
394            num_old,
395            num_new,
396            num_enqueued
397        );
398
399        sync.references = new_set;
400        sync.enqueued_references.extend(enqueued_references);
401
402        debug!("Ending ReferenceProcessor.scan({:?})", self.semantics);
403    }
404
405    /// Retain referent in the reference table. This method deals only with soft references.
406    /// It retains the referent if the reference is definitely reachable. This method does
407    /// not update reference or referent. So after this method, scan() should be used to update
408    /// the references/referents.
409    fn retain<E: ProcessEdgesWork>(&self, trace: &mut E, _nursery: bool) {
410        debug_assert!(self.semantics == Semantics::SOFT);
411
412        let sync = self.sync.lock().unwrap();
413
414        debug!("Starting ReferenceProcessor.retain({:?})", self.semantics);
415        trace!(
416            "{:?} Reference table is {:?}",
417            self.semantics,
418            sync.references
419        );
420
421        let num_refs = sync.references.len();
422        let mut num_live = 0usize;
423        let mut num_retained = 0usize;
424
425        for reference in sync.references.iter() {
426            trace!("Processing reference: {:?}", reference);
427
428            if !reference.is_live() {
429                // Reference is currently unreachable but may get reachable by the
430                // following trace. We postpone the decision.
431                continue;
432            }
433            num_live += 1;
434            // Reference is definitely reachable.  Retain the referent.
435            if let Some(referent) = <E::VM as VMBinding>::VMReferenceGlue::get_referent(*reference)
436            {
437                Self::keep_referent_alive(trace, referent);
438                num_retained += 1;
439                trace!(" ~> {:?} (retained)", referent);
440            }
441        }
442
443        probe!(mmtk, reference_retained, num_refs, num_live, num_retained,);
444
445        debug!("Ending ReferenceProcessor.retain({:?})", self.semantics);
446    }
447
448    /// Process a reference.
449    /// * If both the reference and the referent is alive, return the updated reference and update its referent properly.
450    /// * If the reference is alive, and the referent is not cleared but not alive, return None and the reference (with cleared referent) is enqueued.
451    /// * For other cases, return None.
452    ///
453    /// If a None value is returned, the reference can be removed from the reference table. Otherwise, the updated reference should be kept
454    /// in the reference table.
455    fn process_reference<VM: VMBinding>(
456        &self,
457        reference: ObjectReference,
458        enqueued_references: &mut Vec<ObjectReference>,
459    ) -> Option<ObjectReference> {
460        trace!("Process reference: {}", reference);
461
462        // If the reference is dead, we're done with it. Let it (and
463        // possibly its referent) be garbage-collected.
464        if !reference.is_live() {
465            VM::VMReferenceGlue::clear_referent(reference);
466            trace!(" UNREACHABLE reference: {}", reference);
467            return None;
468        }
469
470        // The reference object is live.
471        let new_reference = Self::get_forwarded_reference(reference);
472        trace!(" forwarded to: {}", new_reference);
473
474        // Get the old referent.
475        let maybe_old_referent = VM::VMReferenceGlue::get_referent(reference);
476        trace!(" referent: {:?}", maybe_old_referent);
477
478        // If the application has cleared the referent the Java spec says
479        // this does not cause the Reference object to be enqueued. We
480        // simply allow the Reference object to fall out of our
481        // waiting list.
482        let Some(old_referent) = maybe_old_referent else {
483            trace!("  (cleared referent) ");
484            return None;
485        };
486
487        if old_referent.is_live() {
488            // Referent is still reachable in a way that is as strong as
489            // or stronger than the current reference level.
490            let new_referent = Self::get_forwarded_referent(old_referent);
491            debug_assert!(new_referent.is_live());
492            trace!("  forwarded referent to: {}", new_referent);
493
494            // The reference object stays on the waiting list, and the
495            // referent is untouched. The only thing we must do is
496            // ensure that the former addresses are updated with the
497            // new forwarding addresses in case the collector is a
498            // copying collector.
499
500            // Update the referent
501            VM::VMReferenceGlue::set_referent(new_reference, new_referent);
502            Some(new_reference)
503        } else {
504            // Referent is unreachable. Clear the referent and enqueue the reference object.
505            trace!("  UNREACHABLE referent: {}", old_referent);
506
507            VM::VMReferenceGlue::clear_referent(new_reference);
508            enqueued_references.push(new_reference);
509            None
510        }
511    }
512}
513
514use crate::scheduler::GCWork;
515use crate::scheduler::GCWorker;
516use crate::MMTK;
517use std::marker::PhantomData;
518
519#[derive(Default)]
520pub(crate) struct RescanReferences<VM: VMBinding> {
521    pub soft: bool,
522    pub weak: bool,
523    pub phantom_data: PhantomData<VM>,
524}
525
526impl<VM: VMBinding> GCWork<VM> for RescanReferences<VM> {
527    fn do_work(&mut self, _worker: &mut GCWorker<VM>, mmtk: &'static MMTK<VM>) {
528        if self.soft {
529            mmtk.reference_processors.scan_soft_refs(mmtk);
530        }
531        if self.weak {
532            mmtk.reference_processors.scan_weak_refs(mmtk);
533        }
534    }
535}
536
537#[derive(Default)]
538pub(crate) struct SoftRefProcessing<E: ProcessEdgesWork>(PhantomData<E>);
539impl<E: ProcessEdgesWork> GCWork<E::VM> for SoftRefProcessing<E> {
540    fn do_work(&mut self, worker: &mut GCWorker<E::VM>, mmtk: &'static MMTK<E::VM>) {
541        if !mmtk.state.is_emergency_collection() {
542            // Postpone the scanning to the end of the transitive closure from strongly reachable
543            // soft references.
544            let rescan = Box::new(RescanReferences {
545                soft: true,
546                weak: false,
547                phantom_data: PhantomData,
548            });
549            worker.scheduler().work_buckets[WorkBucketStage::SoftRefClosure].set_sentinel(rescan);
550
551            // Retain soft references.  This will expand the transitive closure.  We create an
552            // instance of `E` for this.
553            let mut w = E::new(vec![], false, mmtk, WorkBucketStage::SoftRefClosure);
554            w.set_worker(worker);
555            mmtk.reference_processors.retain_soft_refs(&mut w, mmtk);
556            w.flush();
557        } else {
558            // Scan soft references immediately without retaining.
559            mmtk.reference_processors.scan_soft_refs(mmtk);
560        }
561    }
562}
563impl<E: ProcessEdgesWork> SoftRefProcessing<E> {
564    pub fn new() -> Self {
565        Self(PhantomData)
566    }
567}
568
569#[derive(Default)]
570pub(crate) struct WeakRefProcessing<VM: VMBinding>(PhantomData<VM>);
571impl<VM: VMBinding> GCWork<VM> for WeakRefProcessing<VM> {
572    fn do_work(&mut self, _worker: &mut GCWorker<VM>, mmtk: &'static MMTK<VM>) {
573        mmtk.reference_processors.scan_weak_refs(mmtk);
574    }
575}
576impl<VM: VMBinding> WeakRefProcessing<VM> {
577    pub fn new() -> Self {
578        Self(PhantomData)
579    }
580}
581
582#[derive(Default)]
583pub(crate) struct PhantomRefProcessing<VM: VMBinding>(PhantomData<VM>);
584impl<VM: VMBinding> GCWork<VM> for PhantomRefProcessing<VM> {
585    fn do_work(&mut self, _worker: &mut GCWorker<VM>, mmtk: &'static MMTK<VM>) {
586        mmtk.reference_processors.scan_phantom_refs(mmtk);
587    }
588}
589impl<VM: VMBinding> PhantomRefProcessing<VM> {
590    pub fn new() -> Self {
591        Self(PhantomData)
592    }
593}
594
595#[derive(Default)]
596pub(crate) struct RefForwarding<E: ProcessEdgesWork>(PhantomData<E>);
597impl<E: ProcessEdgesWork> GCWork<E::VM> for RefForwarding<E> {
598    fn do_work(&mut self, worker: &mut GCWorker<E::VM>, mmtk: &'static MMTK<E::VM>) {
599        let mut w = E::new(vec![], false, mmtk, WorkBucketStage::RefForwarding);
600        w.set_worker(worker);
601        mmtk.reference_processors.forward_refs(&mut w, mmtk);
602        w.flush();
603    }
604}
605impl<E: ProcessEdgesWork> RefForwarding<E> {
606    pub fn new() -> Self {
607        Self(PhantomData)
608    }
609}
610
611#[derive(Default)]
612pub(crate) struct RefEnqueue<VM: VMBinding>(PhantomData<VM>);
613impl<VM: VMBinding> GCWork<VM> for RefEnqueue<VM> {
614    fn do_work(&mut self, worker: &mut GCWorker<VM>, mmtk: &'static MMTK<VM>) {
615        mmtk.reference_processors.enqueue_refs::<VM>(worker.tls);
616    }
617}
618impl<VM: VMBinding> RefEnqueue<VM> {
619    pub fn new() -> Self {
620        Self(PhantomData)
621    }
622}