1use std::collections::HashSet;
2use std::sync::atomic::AtomicBool;
3use std::sync::atomic::Ordering;
4use std::sync::Mutex;
5use std::vec::Vec;
6
7use crate::plan::is_nursery_gc;
8use crate::scheduler::ProcessEdgesWork;
9use crate::scheduler::WorkBucketStage;
10use crate::util::ObjectReference;
11use crate::util::VMWorkerThread;
12use crate::vm::ReferenceGlue;
13use crate::vm::VMBinding;
14
15pub struct ReferenceProcessors {
19 soft: ReferenceProcessor,
20 weak: ReferenceProcessor,
21 phantom: ReferenceProcessor,
22}
23
24impl ReferenceProcessors {
25 pub fn new() -> Self {
26 ReferenceProcessors {
27 soft: ReferenceProcessor::new(Semantics::SOFT),
28 weak: ReferenceProcessor::new(Semantics::WEAK),
29 phantom: ReferenceProcessor::new(Semantics::PHANTOM),
30 }
31 }
32
33 pub fn get(&self, semantics: Semantics) -> &ReferenceProcessor {
34 match semantics {
35 Semantics::SOFT => &self.soft,
36 Semantics::WEAK => &self.weak,
37 Semantics::PHANTOM => &self.phantom,
38 }
39 }
40
41 pub fn add_soft_candidate(&self, reff: ObjectReference) {
42 trace!("Add soft candidate: {}", reff);
43 self.soft.add_candidate(reff);
44 }
45
46 pub fn add_weak_candidate(&self, reff: ObjectReference) {
47 trace!("Add weak candidate: {}", reff);
48 self.weak.add_candidate(reff);
49 }
50
51 pub fn add_phantom_candidate(&self, reff: ObjectReference) {
52 trace!("Add phantom candidate: {}", reff);
53 self.phantom.add_candidate(reff);
54 }
55
56 pub fn enqueue_refs<VM: VMBinding>(&self, tls: VMWorkerThread) {
60 self.soft.enqueue::<VM>(tls);
61 self.weak.enqueue::<VM>(tls);
62 self.phantom.enqueue::<VM>(tls);
63 }
64
65 pub fn forward_refs<E: ProcessEdgesWork>(&self, trace: &mut E, mmtk: &'static MMTK<E::VM>) {
70 debug_assert!(
71 mmtk.get_plan().constraints().needs_forward_after_liveness,
72 "A plan with needs_forward_after_liveness=false does not need a separate forward step"
73 );
74 self.soft
75 .forward::<E>(trace, is_nursery_gc(mmtk.get_plan()));
76 self.weak
77 .forward::<E>(trace, is_nursery_gc(mmtk.get_plan()));
78 self.phantom
79 .forward::<E>(trace, is_nursery_gc(mmtk.get_plan()));
80 }
81
82 pub fn retain_soft_refs<E: ProcessEdgesWork>(&self, trace: &mut E, mmtk: &'static MMTK<E::VM>) {
85 self.soft.retain::<E>(trace, is_nursery_gc(mmtk.get_plan()));
86 }
87
88 pub fn scan_soft_refs<VM: VMBinding>(&self, mmtk: &'static MMTK<VM>) {
90 self.soft.scan::<VM>(is_nursery_gc(mmtk.get_plan()));
92 }
93
94 pub fn scan_weak_refs<VM: VMBinding>(&self, mmtk: &'static MMTK<VM>) {
96 self.weak.scan::<VM>(is_nursery_gc(mmtk.get_plan()));
97 }
98
99 pub fn scan_phantom_refs<VM: VMBinding>(&self, mmtk: &'static MMTK<VM>) {
101 self.phantom.scan::<VM>(is_nursery_gc(mmtk.get_plan()));
102 }
103}
104
105impl Default for ReferenceProcessors {
106 fn default() -> Self {
107 Self::new()
108 }
109}
110
111const INITIAL_SIZE: usize = 256;
119
120pub struct ReferenceProcessor {
128 sync: Mutex<ReferenceProcessorSync>,
130
131 semantics: Semantics,
133
134 allow_new_candidate: AtomicBool,
147}
148
149#[derive(Debug, PartialEq, Clone, Copy)]
150pub enum Semantics {
151 SOFT,
152 WEAK,
153 PHANTOM,
154}
155
156struct ReferenceProcessorSync {
157 references: HashSet<ObjectReference>,
163
164 enqueued_references: Vec<ObjectReference>,
167
168 nursery_index: usize,
170}
171
172impl ReferenceProcessor {
173 pub fn new(semantics: Semantics) -> Self {
174 ReferenceProcessor {
175 sync: Mutex::new(ReferenceProcessorSync {
176 references: HashSet::with_capacity(INITIAL_SIZE),
177 enqueued_references: vec![],
178 nursery_index: 0,
179 }),
180 semantics,
181 allow_new_candidate: AtomicBool::new(true),
182 }
183 }
184
185 pub fn add_candidate(&self, reff: ObjectReference) {
187 if !self.allow_new_candidate.load(Ordering::SeqCst) {
188 return;
189 }
190
191 let mut sync = self.sync.lock().unwrap();
192 sync.references.insert(reff);
193 }
194
195 fn disallow_new_candidate(&self) {
196 self.allow_new_candidate.store(false, Ordering::SeqCst);
197 }
198
199 fn allow_new_candidate(&self) {
200 self.allow_new_candidate.store(true, Ordering::SeqCst);
201 }
202
203 fn get_forwarded_referent(referent: ObjectReference) -> ObjectReference {
212 debug_assert!(referent.is_live());
213 referent.get_forwarded_object().unwrap_or(referent)
214 }
215
216 fn get_forwarded_reference(object: ObjectReference) -> ObjectReference {
220 debug_assert!(object.is_live());
221 object.get_forwarded_object().unwrap_or(object)
222 }
223
224 fn keep_referent_alive<E: ProcessEdgesWork>(
236 e: &mut E,
237 referent: ObjectReference,
238 ) -> ObjectReference {
239 e.trace_object(referent)
240 }
241
242 fn trace_forward_object<E: ProcessEdgesWork>(
247 e: &mut E,
248 referent: ObjectReference,
249 ) -> ObjectReference {
250 e.trace_object(referent)
251 }
252
253 pub fn enqueue<VM: VMBinding>(&self, tls: VMWorkerThread) {
255 self.disallow_new_candidate();
260 let mut sync = self.sync.lock().unwrap();
261
262 #[cfg(debug_assertions)]
264 {
265 sync.references.iter().for_each(|reff| {
267 debug_assert!(reff.is_in_any_space());
268 if let Some(referent) = VM::VMReferenceGlue::get_referent(*reff) {
269 debug_assert!(
270 referent.is_in_any_space(),
271 "Referent {:?} (of reference {:?}) is not in any space",
272 referent,
273 reff
274 );
275 }
276 });
277 sync.enqueued_references.iter().for_each(|reff| {
279 debug_assert!(reff.is_in_any_space());
280 let maybe_referent = VM::VMReferenceGlue::get_referent(*reff);
281 debug_assert!(maybe_referent.is_none());
282 });
283 }
284
285 if !sync.enqueued_references.is_empty() {
286 trace!("enqueue: {:?}", sync.enqueued_references);
287 VM::VMReferenceGlue::enqueue_references(&sync.enqueued_references, tls);
288 sync.enqueued_references.clear();
289 }
290
291 self.allow_new_candidate();
292 }
293
294 pub fn forward<E: ProcessEdgesWork>(&self, trace: &mut E, _nursery: bool) {
298 let mut sync = self.sync.lock().unwrap();
299 debug!("Starting ReferenceProcessor.forward({:?})", self.semantics);
300
301 fn forward_reference<E: ProcessEdgesWork>(
303 trace: &mut E,
304 reference: ObjectReference,
305 ) -> ObjectReference {
306 {
307 use crate::vm::ObjectModel;
308 trace!(
309 "Forwarding reference: {} (size: {})",
310 reference,
311 <E::VM as VMBinding>::VMObjectModel::get_current_size(reference)
312 );
313 }
314
315 if let Some(old_referent) =
316 <E::VM as VMBinding>::VMReferenceGlue::get_referent(reference)
317 {
318 let new_referent = ReferenceProcessor::trace_forward_object(trace, old_referent);
319 <E::VM as VMBinding>::VMReferenceGlue::set_referent(reference, new_referent);
320
321 trace!(
322 " referent: {} (forwarded to {})",
323 old_referent,
324 new_referent
325 );
326 }
327
328 let new_reference = ReferenceProcessor::trace_forward_object(trace, reference);
329 trace!(" reference: forwarded to {}", new_reference);
330
331 new_reference
332 }
333
334 sync.references = sync
335 .references
336 .iter()
337 .map(|reff| forward_reference::<E>(trace, *reff))
338 .collect();
339
340 sync.enqueued_references = sync
341 .enqueued_references
342 .iter()
343 .map(|reff| forward_reference::<E>(trace, *reff))
344 .collect();
345
346 debug!("Ending ReferenceProcessor.forward({:?})", self.semantics);
347
348 self.disallow_new_candidate();
350 }
351
352 fn scan<VM: VMBinding>(&self, _nursery: bool) {
358 let mut sync = self.sync.lock().unwrap();
359
360 debug!("Starting ReferenceProcessor.scan({:?})", self.semantics);
361
362 trace!(
363 "{:?} Reference table is {:?}",
364 self.semantics,
365 sync.references
366 );
367
368 let mut enqueued_references = vec![];
371
372 let new_set: HashSet<ObjectReference> = sync
374 .references
375 .iter()
376 .filter_map(|reff| self.process_reference::<VM>(*reff, &mut enqueued_references))
377 .collect();
378
379 let num_old = sync.references.len();
380 let num_new = new_set.len();
381 let num_enqueued = enqueued_references.len();
382
383 debug!(
384 "{:?} reference table from {} to {} ({} enqueued)",
385 self.semantics, num_old, num_new, num_enqueued,
386 );
387
388 let semantics_int = self.semantics as usize;
389
390 probe!(
391 mmtk,
392 reference_scanned,
393 semantics_int,
394 num_old,
395 num_new,
396 num_enqueued
397 );
398
399 sync.references = new_set;
400 sync.enqueued_references.extend(enqueued_references);
401
402 debug!("Ending ReferenceProcessor.scan({:?})", self.semantics);
403 }
404
405 fn retain<E: ProcessEdgesWork>(&self, trace: &mut E, _nursery: bool) {
410 debug_assert!(self.semantics == Semantics::SOFT);
411
412 let sync = self.sync.lock().unwrap();
413
414 debug!("Starting ReferenceProcessor.retain({:?})", self.semantics);
415 trace!(
416 "{:?} Reference table is {:?}",
417 self.semantics,
418 sync.references
419 );
420
421 let num_refs = sync.references.len();
422 let mut num_live = 0usize;
423 let mut num_retained = 0usize;
424
425 for reference in sync.references.iter() {
426 trace!("Processing reference: {:?}", reference);
427
428 if !reference.is_live() {
429 continue;
432 }
433 num_live += 1;
434 if let Some(referent) = <E::VM as VMBinding>::VMReferenceGlue::get_referent(*reference)
436 {
437 Self::keep_referent_alive(trace, referent);
438 num_retained += 1;
439 trace!(" ~> {:?} (retained)", referent);
440 }
441 }
442
443 probe!(mmtk, reference_retained, num_refs, num_live, num_retained,);
444
445 debug!("Ending ReferenceProcessor.retain({:?})", self.semantics);
446 }
447
448 fn process_reference<VM: VMBinding>(
456 &self,
457 reference: ObjectReference,
458 enqueued_references: &mut Vec<ObjectReference>,
459 ) -> Option<ObjectReference> {
460 trace!("Process reference: {}", reference);
461
462 if !reference.is_live() {
465 VM::VMReferenceGlue::clear_referent(reference);
466 trace!(" UNREACHABLE reference: {}", reference);
467 return None;
468 }
469
470 let new_reference = Self::get_forwarded_reference(reference);
472 trace!(" forwarded to: {}", new_reference);
473
474 let maybe_old_referent = VM::VMReferenceGlue::get_referent(reference);
476 trace!(" referent: {:?}", maybe_old_referent);
477
478 let Some(old_referent) = maybe_old_referent else {
483 trace!(" (cleared referent) ");
484 return None;
485 };
486
487 if old_referent.is_live() {
488 let new_referent = Self::get_forwarded_referent(old_referent);
491 debug_assert!(new_referent.is_live());
492 trace!(" forwarded referent to: {}", new_referent);
493
494 VM::VMReferenceGlue::set_referent(new_reference, new_referent);
502 Some(new_reference)
503 } else {
504 trace!(" UNREACHABLE referent: {}", old_referent);
506
507 VM::VMReferenceGlue::clear_referent(new_reference);
508 enqueued_references.push(new_reference);
509 None
510 }
511 }
512}
513
514use crate::scheduler::GCWork;
515use crate::scheduler::GCWorker;
516use crate::MMTK;
517use std::marker::PhantomData;
518
519#[derive(Default)]
520pub(crate) struct RescanReferences<VM: VMBinding> {
521 pub soft: bool,
522 pub weak: bool,
523 pub phantom_data: PhantomData<VM>,
524}
525
526impl<VM: VMBinding> GCWork<VM> for RescanReferences<VM> {
527 fn do_work(&mut self, _worker: &mut GCWorker<VM>, mmtk: &'static MMTK<VM>) {
528 if self.soft {
529 mmtk.reference_processors.scan_soft_refs(mmtk);
530 }
531 if self.weak {
532 mmtk.reference_processors.scan_weak_refs(mmtk);
533 }
534 }
535}
536
537#[derive(Default)]
538pub(crate) struct SoftRefProcessing<E: ProcessEdgesWork>(PhantomData<E>);
539impl<E: ProcessEdgesWork> GCWork<E::VM> for SoftRefProcessing<E> {
540 fn do_work(&mut self, worker: &mut GCWorker<E::VM>, mmtk: &'static MMTK<E::VM>) {
541 if !mmtk.state.is_emergency_collection() {
542 let rescan = Box::new(RescanReferences {
545 soft: true,
546 weak: false,
547 phantom_data: PhantomData,
548 });
549 worker.scheduler().work_buckets[WorkBucketStage::SoftRefClosure].set_sentinel(rescan);
550
551 let mut w = E::new(vec![], false, mmtk, WorkBucketStage::SoftRefClosure);
554 w.set_worker(worker);
555 mmtk.reference_processors.retain_soft_refs(&mut w, mmtk);
556 w.flush();
557 } else {
558 mmtk.reference_processors.scan_soft_refs(mmtk);
560 }
561 }
562}
563impl<E: ProcessEdgesWork> SoftRefProcessing<E> {
564 pub fn new() -> Self {
565 Self(PhantomData)
566 }
567}
568
569#[derive(Default)]
570pub(crate) struct WeakRefProcessing<VM: VMBinding>(PhantomData<VM>);
571impl<VM: VMBinding> GCWork<VM> for WeakRefProcessing<VM> {
572 fn do_work(&mut self, _worker: &mut GCWorker<VM>, mmtk: &'static MMTK<VM>) {
573 mmtk.reference_processors.scan_weak_refs(mmtk);
574 }
575}
576impl<VM: VMBinding> WeakRefProcessing<VM> {
577 pub fn new() -> Self {
578 Self(PhantomData)
579 }
580}
581
582#[derive(Default)]
583pub(crate) struct PhantomRefProcessing<VM: VMBinding>(PhantomData<VM>);
584impl<VM: VMBinding> GCWork<VM> for PhantomRefProcessing<VM> {
585 fn do_work(&mut self, _worker: &mut GCWorker<VM>, mmtk: &'static MMTK<VM>) {
586 mmtk.reference_processors.scan_phantom_refs(mmtk);
587 }
588}
589impl<VM: VMBinding> PhantomRefProcessing<VM> {
590 pub fn new() -> Self {
591 Self(PhantomData)
592 }
593}
594
595#[derive(Default)]
596pub(crate) struct RefForwarding<E: ProcessEdgesWork>(PhantomData<E>);
597impl<E: ProcessEdgesWork> GCWork<E::VM> for RefForwarding<E> {
598 fn do_work(&mut self, worker: &mut GCWorker<E::VM>, mmtk: &'static MMTK<E::VM>) {
599 let mut w = E::new(vec![], false, mmtk, WorkBucketStage::RefForwarding);
600 w.set_worker(worker);
601 mmtk.reference_processors.forward_refs(&mut w, mmtk);
602 w.flush();
603 }
604}
605impl<E: ProcessEdgesWork> RefForwarding<E> {
606 pub fn new() -> Self {
607 Self(PhantomData)
608 }
609}
610
611#[derive(Default)]
612pub(crate) struct RefEnqueue<VM: VMBinding>(PhantomData<VM>);
613impl<VM: VMBinding> GCWork<VM> for RefEnqueue<VM> {
614 fn do_work(&mut self, worker: &mut GCWorker<VM>, mmtk: &'static MMTK<VM>) {
615 mmtk.reference_processors.enqueue_refs::<VM>(worker.tls);
616 }
617}
618impl<VM: VMBinding> RefEnqueue<VM> {
619 pub fn new() -> Self {
620 Self(PhantomData)
621 }
622}