mmtk/plan/global.rs
1//! The global part of a plan implementation.
2
3use super::PlanConstraints;
4use crate::global_state::GlobalState;
5use crate::mmtk::MMTK;
6use crate::plan::gc_work::{ClearCommonPlanUnlogBits, SetCommonPlanUnlogBits};
7use crate::plan::tracing::ObjectQueue;
8use crate::plan::Mutator;
9use crate::policy::immortalspace::ImmortalSpace;
10use crate::policy::largeobjectspace::LargeObjectSpace;
11use crate::policy::space::{PlanCreateSpaceArgs, Space};
12#[cfg(feature = "vm_space")]
13use crate::policy::vmspace::VMSpace;
14use crate::scheduler::*;
15use crate::util::alloc::allocators::AllocatorSelector;
16use crate::util::copy::{CopyConfig, GCWorkerCopyContext};
17use crate::util::heap::gc_trigger::GCTrigger;
18use crate::util::heap::gc_trigger::SpaceStats;
19use crate::util::heap::layout::Mmapper;
20use crate::util::heap::layout::VMMap;
21use crate::util::heap::HeapMeta;
22use crate::util::heap::VMRequest;
23use crate::util::metadata::log_bit::UnlogBitsOperation;
24use crate::util::metadata::side_metadata::SideMetadataSanity;
25use crate::util::metadata::side_metadata::SideMetadataSpec;
26use crate::util::options::Options;
27use crate::util::options::PlanSelector;
28use crate::util::statistics::stats::Stats;
29use crate::util::{conversions, ObjectReference};
30use crate::util::{VMMutatorThread, VMWorkerThread};
31use crate::vm::*;
32use downcast_rs::Downcast;
33use enum_map::EnumMap;
34use std::sync::atomic::Ordering;
35use std::sync::Arc;
36
37use mmtk_macros::{HasSpaces, PlanTraceObject};
38
39pub fn create_mutator<VM: VMBinding>(
40 tls: VMMutatorThread,
41 mmtk: &'static MMTK<VM>,
42) -> Box<Mutator<VM>> {
43 Box::new(match *mmtk.options.plan {
44 PlanSelector::NoGC => crate::plan::nogc::mutator::create_nogc_mutator(tls, mmtk),
45 PlanSelector::SemiSpace => crate::plan::semispace::mutator::create_ss_mutator(tls, mmtk),
46 PlanSelector::GenCopy => {
47 crate::plan::generational::copying::mutator::create_gencopy_mutator(tls, mmtk)
48 }
49 PlanSelector::GenImmix => {
50 crate::plan::generational::immix::mutator::create_genimmix_mutator(tls, mmtk)
51 }
52 PlanSelector::MarkSweep => crate::plan::marksweep::mutator::create_ms_mutator(tls, mmtk),
53 PlanSelector::Immix => crate::plan::immix::mutator::create_immix_mutator(tls, mmtk),
54 PlanSelector::PageProtect => {
55 crate::plan::pageprotect::mutator::create_pp_mutator(tls, mmtk)
56 }
57 PlanSelector::MarkCompact => {
58 crate::plan::markcompact::mutator::create_markcompact_mutator(tls, mmtk)
59 }
60 PlanSelector::StickyImmix => {
61 crate::plan::sticky::immix::mutator::create_stickyimmix_mutator(tls, mmtk)
62 }
63 PlanSelector::ConcurrentImmix => {
64 crate::plan::concurrent::immix::mutator::create_concurrent_immix_mutator(tls, mmtk)
65 }
66 PlanSelector::Compressor => {
67 crate::plan::compressor::mutator::create_compressor_mutator(tls, mmtk)
68 }
69 })
70}
71
72pub fn create_plan<VM: VMBinding>(
73 plan: PlanSelector,
74 args: CreateGeneralPlanArgs<VM>,
75) -> Box<dyn Plan<VM = VM>> {
76 let plan = match plan {
77 PlanSelector::NoGC => {
78 Box::new(crate::plan::nogc::NoGC::new(args)) as Box<dyn Plan<VM = VM>>
79 }
80 PlanSelector::SemiSpace => {
81 Box::new(crate::plan::semispace::SemiSpace::new(args)) as Box<dyn Plan<VM = VM>>
82 }
83 PlanSelector::GenCopy => Box::new(crate::plan::generational::copying::GenCopy::new(args))
84 as Box<dyn Plan<VM = VM>>,
85 PlanSelector::GenImmix => Box::new(crate::plan::generational::immix::GenImmix::new(args))
86 as Box<dyn Plan<VM = VM>>,
87 PlanSelector::MarkSweep => {
88 Box::new(crate::plan::marksweep::MarkSweep::new(args)) as Box<dyn Plan<VM = VM>>
89 }
90 PlanSelector::Immix => {
91 Box::new(crate::plan::immix::Immix::new(args)) as Box<dyn Plan<VM = VM>>
92 }
93 PlanSelector::PageProtect => {
94 Box::new(crate::plan::pageprotect::PageProtect::new(args)) as Box<dyn Plan<VM = VM>>
95 }
96 PlanSelector::MarkCompact => {
97 Box::new(crate::plan::markcompact::MarkCompact::new(args)) as Box<dyn Plan<VM = VM>>
98 }
99 PlanSelector::StickyImmix => {
100 Box::new(crate::plan::sticky::immix::StickyImmix::new(args)) as Box<dyn Plan<VM = VM>>
101 }
102 PlanSelector::ConcurrentImmix => {
103 Box::new(crate::plan::concurrent::immix::ConcurrentImmix::new(args))
104 as Box<dyn Plan<VM = VM>>
105 }
106 PlanSelector::Compressor => {
107 Box::new(crate::plan::compressor::Compressor::new(args)) as Box<dyn Plan<VM = VM>>
108 }
109 };
110
111 // We have created Plan in the heap, and we won't explicitly move it.
112 // Each space now has a fixed address for its lifetime. It is safe now to initialize SFT.
113 let sft_map: &mut dyn crate::policy::sft_map::SFTMap =
114 unsafe { crate::mmtk::SFT_MAP.get_mut() }.as_mut();
115 plan.for_each_space(&mut |s| {
116 sft_map.notify_space_creation(s.as_sft());
117 s.initialize_sft(sft_map);
118 });
119
120 plan
121}
122
123/// Create thread local GC worker.
124pub fn create_gc_worker_context<VM: VMBinding>(
125 tls: VMWorkerThread,
126 mmtk: &'static MMTK<VM>,
127) -> GCWorkerCopyContext<VM> {
128 GCWorkerCopyContext::<VM>::new(tls, mmtk, mmtk.get_plan().create_copy_config())
129}
130
131/// A plan describes the global core functionality for all memory management schemes.
132/// All global MMTk plans should implement this trait.
133///
134/// The global instance defines and manages static resources
135/// (such as memory and virtual memory resources).
136///
137/// Constructor:
138///
139/// For the constructor of a new plan, there are a few things the constructor _must_ do
140/// (please check existing plans and see what they do in the constructor):
141/// 1. Create a HeapMeta, and use this HeapMeta to initialize all the spaces.
142/// 2. Create a vector of all the side metadata specs with `SideMetadataContext::new_global_specs()`,
143/// the parameter is a vector of global side metadata specs that are specific to the plan.
144/// 3. Initialize all the spaces the plan uses with the heap meta, and the global metadata specs vector.
145/// 4. Invoke the `verify_side_metadata_sanity()` method of the plan.
146/// It will create a `SideMetadataSanity` object, and invoke verify_side_metadata_sanity() for each space (or
147/// invoke verify_side_metadata_sanity() in `CommonPlan`/`BasePlan` for the spaces in the common/base plan).
148///
149/// Methods in this trait:
150///
151/// Only methods that will be overridden by each specific plan should be included in this trait. The trait may
152/// provide a default implementation, and each plan can override the implementation. For methods that won't be
153/// overridden, we should implement those methods in BasePlan (or CommonPlan) and call them from there instead.
154/// We should avoid having methods with the same name in both Plan and BasePlan, as this may confuse people, and
155/// they may call a wrong method by mistake.
156// TODO: Some methods that are not overriden can be moved from the trait to BasePlan.
157pub trait Plan: 'static + HasSpaces + Sync + Downcast {
158 /// Get the plan constraints for the plan.
159 /// This returns a non-constant value. A constant value can be found in each plan's module if needed.
160 fn constraints(&self) -> &'static PlanConstraints;
161
162 /// Create a copy config for this plan. A copying GC plan MUST override this method,
163 /// and provide a valid config.
164 fn create_copy_config(&'static self) -> CopyConfig<Self::VM> {
165 // Use the empty default copy config for non copying GC.
166 CopyConfig::default()
167 }
168
169 /// Get a immutable reference to the base plan. `BasePlan` is included by all the MMTk GC plans.
170 fn base(&self) -> &BasePlan<Self::VM>;
171
172 /// Get a mutable reference to the base plan. `BasePlan` is included by all the MMTk GC plans.
173 fn base_mut(&mut self) -> &mut BasePlan<Self::VM>;
174
175 /// Schedule work for the upcoming GC.
176 fn schedule_collection(&'static self, _scheduler: &GCWorkScheduler<Self::VM>);
177
178 /// Get the common plan. CommonPlan is included by most of MMTk GC plans.
179 fn common(&self) -> &CommonPlan<Self::VM> {
180 panic!("Common Plan not handled!")
181 }
182
183 /// Return a reference to `GenerationalPlan` to allow
184 /// access methods specific to generational plans if the plan is a generational plan.
185 fn generational(
186 &self,
187 ) -> Option<&dyn crate::plan::generational::global::GenerationalPlan<VM = Self::VM>> {
188 None
189 }
190
191 /// Return a reference to `ConcurrentPlan` to allow
192 /// access methods specific to concurrent plans if the plan is a concurrent plan.
193 fn concurrent(
194 &self,
195 ) -> Option<&dyn crate::plan::concurrent::global::ConcurrentPlan<VM = Self::VM>> {
196 None
197 }
198
199 /// Get the current run time options.
200 fn options(&self) -> &Options {
201 &self.base().options
202 }
203
204 /// Get the allocator mapping between [`crate::AllocationSemantics`] and [`crate::util::alloc::AllocatorSelector`].
205 /// This defines what space this plan will allocate objects into for different semantics.
206 fn get_allocator_mapping(&self) -> &'static EnumMap<AllocationSemantics, AllocatorSelector>;
207
208 /// Called when all mutators are paused. This is called before prepare.
209 fn notify_mutators_paused(&self, _scheduler: &GCWorkScheduler<Self::VM>) {}
210
211 /// Prepare the plan before a GC. This is invoked in an initial step in the GC.
212 /// This is invoked once per GC by one worker thread. `tls` is the worker thread that executes this method.
213 fn prepare(&mut self, tls: VMWorkerThread);
214
215 /// Prepare a worker for a GC. Each worker has its own prepare method. This hook is for plan-specific
216 /// per-worker preparation. This method is invoked once per worker by the worker thread passed as the argument.
217 fn prepare_worker(&self, _worker: &mut GCWorker<Self::VM>) {}
218
219 /// Release the plan after transitive closure. A plan can implement this method to call each policy's release,
220 /// or create any work packet that should be done in release.
221 /// This is invoked once per GC by one worker thread. `tls` is the worker thread that executes this method.
222 fn release(&mut self, tls: VMWorkerThread);
223
224 /// Inform the plan about the end of a GC. It is guaranteed that there is no further work for this GC.
225 /// This is invoked once per GC by one worker thread. `tls` is the worker thread that executes this method.
226 // TODO: This is actually called at the end of a pause/STW, rather than the end of a GC. It should be renamed.
227 fn end_of_gc(&mut self, _tls: VMWorkerThread);
228
229 /// Notify the plan that an emergency collection will happen. The plan should try to free as much memory as possible.
230 /// The default implementation will force a full heap collection for generational plans.
231 fn notify_emergency_collection(&self) {
232 if let Some(gen) = self.generational() {
233 gen.force_full_heap_collection();
234 }
235 }
236
237 /// Ask the plan if they would trigger a GC. If MMTk is in charge of triggering GCs, this method is called
238 /// periodically during allocation. However, MMTk may delegate the GC triggering decision to the runtime,
239 /// in which case, this method may not be called. This method returns true to trigger a collection.
240 ///
241 /// # Arguments
242 /// * `space_full`: the allocation to a specific space failed, must recover pages within 'space'.
243 /// * `space`: an option to indicate if there is a space that has failed in an allocation.
244 fn collection_required(&self, space_full: bool, space: Option<SpaceStats<Self::VM>>) -> bool;
245
246 // Note: The following methods are about page accounting. The default implementation should
247 // work fine for non-copying plans. For copying plans, the plan should override any of these methods
248 // if necessary.
249
250 /// Get the number of pages that are reserved, including pages used by MMTk spaces, pages that
251 /// will be used (e.g. for copying), and live pages allocated outside MMTk spaces as reported
252 /// by the VM binding.
253 fn get_reserved_pages(&self) -> usize {
254 let used_pages = self.get_used_pages();
255 let collection_reserve = self.get_collection_reserved_pages();
256 let vm_live_bytes = <Self::VM as VMBinding>::VMCollection::vm_live_bytes();
257 // Note that `vm_live_bytes` may not be the exact number of bytes in whole pages. The VM
258 // binding is allowed to return an approximate value if it is expensive or impossible to
259 // compute the exact number of pages occupied.
260 let vm_live_pages = conversions::bytes_to_pages_up(vm_live_bytes);
261 let total = used_pages + collection_reserve + vm_live_pages;
262
263 trace!(
264 "Reserved pages = {}, used pages: {}, collection reserve: {}, VM live pages: {}",
265 total,
266 used_pages,
267 collection_reserve,
268 vm_live_pages,
269 );
270
271 total
272 }
273
274 /// Get the total number of pages for the heap.
275 fn get_total_pages(&self) -> usize {
276 self.base()
277 .gc_trigger
278 .policy
279 .get_current_heap_size_in_pages()
280 }
281
282 /// Get the number of pages that are still available for use. The available pages
283 /// should always be positive or 0.
284 fn get_available_pages(&self) -> usize {
285 let reserved_pages = self.get_reserved_pages();
286 let total_pages = self.get_total_pages();
287
288 // It is possible that the reserved pages is larger than the total pages so we are doing
289 // a saturating subtraction to make sure we return a non-negative number.
290 // For example,
291 // 1. our GC trigger checks if reserved pages is more than total pages.
292 // 2. when the heap is almost full of live objects (such as in the case of an OOM) and we are doing a copying GC, it is possible
293 // the reserved pages is larger than total pages after the copying GC (the reserved pages after a GC
294 // may be larger than the reserved pages before a GC, as we may end up using more memory for thread local
295 // buffers for copy allocators).
296 // 3. the binding disabled GC, and we end up over-allocating beyond the total pages determined by the GC trigger.
297 let available_pages = total_pages.saturating_sub(reserved_pages);
298 trace!(
299 "Total pages = {}, reserved pages = {}, available pages = {}",
300 total_pages,
301 reserved_pages,
302 available_pages,
303 );
304 available_pages
305 }
306
307 /// Get the number of pages that are reserved for collection. By default, we return 0.
308 /// For copying plans, they need to override this and calculate required pages to complete
309 /// a copying GC.
310 fn get_collection_reserved_pages(&self) -> usize {
311 0
312 }
313
314 /// Get the number of pages that are used.
315 fn get_used_pages(&self) -> usize;
316
317 /// Get the number of pages that are NOT used. This is clearly different from available pages.
318 /// Free pages are unused, but some of them may have been reserved for some reason.
319 fn get_free_pages(&self) -> usize {
320 let total_pages = self.get_total_pages();
321 let used_pages = self.get_used_pages();
322
323 // It is possible that the used pages is larger than the total pages, so we use saturating
324 // subtraction. See the comments in `get_available_pages`.
325 total_pages.saturating_sub(used_pages)
326 }
327
328 /// Return whether last GC was an exhaustive attempt to collect the heap.
329 /// For example, for generational GCs, minor collection is not an exhaustive collection.
330 /// For example, for Immix, fast collection (no defragmentation) is not an exhaustive collection.
331 fn last_collection_was_exhaustive(&self) -> bool {
332 true
333 }
334
335 /// Return whether the current GC may move any object. The VM binding can make use of this
336 /// information and choose to or not to update some data structures that record the addresses
337 /// of objects.
338 ///
339 /// This function is callable during a GC. From the VM binding's point of view, the information
340 /// of whether the current GC moves object or not is available since `Collection::stop_mutators`
341 /// is called, and remains available until (but not including) `resume_mutators` at which time
342 /// the current GC has just finished.
343 fn current_gc_may_move_object(&self) -> bool;
344
345 /// An object is firstly reached by a sanity GC. So the object is reachable
346 /// in the current GC, and all the GC work has been done for the object (such as
347 /// tracing and releasing). A plan can implement this to
348 /// use plan specific semantics to check if the object is sane.
349 /// Return true if the object is considered valid by the plan.
350 fn sanity_check_object(&self, _object: ObjectReference) -> bool {
351 true
352 }
353
354 /// Call `space.verify_side_metadata_sanity` for all spaces in this plan.
355 fn verify_side_metadata_sanity(&self) {
356 let mut side_metadata_sanity_checker = SideMetadataSanity::new();
357 self.for_each_space(&mut |space| {
358 space.verify_side_metadata_sanity(&mut side_metadata_sanity_checker);
359 })
360 }
361}
362
363impl_downcast!(Plan assoc VM);
364
365/**
366BasePlan should contain all plan-related state and functions that are _fundamental_ to _all_ plans. These include VM-specific (but not plan-specific) features such as a code space or vm space, which are fundamental to all plans for a given VM. Features that are common to _many_ (but not intrinsically _all_) plans should instead be included in CommonPlan.
367*/
368#[derive(HasSpaces, PlanTraceObject)]
369pub struct BasePlan<VM: VMBinding> {
370 pub(crate) global_state: Arc<GlobalState>,
371 pub options: Arc<Options>,
372 pub gc_trigger: Arc<GCTrigger<VM>>,
373 pub scheduler: Arc<GCWorkScheduler<VM>>,
374
375 // Spaces in base plan
376 #[cfg(feature = "code_space")]
377 #[space]
378 pub code_space: ImmortalSpace<VM>,
379 #[cfg(feature = "code_space")]
380 #[space]
381 pub code_lo_space: ImmortalSpace<VM>,
382 #[cfg(feature = "ro_space")]
383 #[space]
384 pub ro_space: ImmortalSpace<VM>,
385
386 /// A VM space is a space allocated and populated by the VM. Currently it is used by JikesRVM
387 /// for boot image.
388 ///
389 /// If VM space is present, it has some special interaction with the
390 /// `memory_manager::is_mmtk_object` and the `memory_manager::is_in_mmtk_spaces` functions.
391 ///
392 /// - The functions `is_mmtk_object` and `find_object_from_internal_pointer` require
393 /// the valid object (VO) bit side metadata to identify objects.
394 /// If the binding maintains the VO bit for objects in VM spaces, those functions will work accordingly.
395 /// Otherwise, calling them is undefined behavior.
396 ///
397 /// - The `is_in_mmtk_spaces` currently returns `true` if the given object reference is in
398 /// the VM space.
399 #[cfg(feature = "vm_space")]
400 #[space]
401 pub vm_space: VMSpace<VM>,
402}
403
404/// Args needed for creating any plan. This includes a set of contexts from MMTK or global. This
405/// is passed to each plan's constructor.
406pub struct CreateGeneralPlanArgs<'a, VM: VMBinding> {
407 pub vm_map: &'static dyn VMMap,
408 pub mmapper: &'static dyn Mmapper,
409 pub options: Arc<Options>,
410 pub state: Arc<GlobalState>,
411 pub gc_trigger: Arc<crate::util::heap::gc_trigger::GCTrigger<VM>>,
412 pub scheduler: Arc<GCWorkScheduler<VM>>,
413 pub stats: &'a Stats,
414 pub heap: &'a mut HeapMeta,
415}
416
417/// Args needed for creating a specific plan. This includes plan-specific args, such as plan constrainst
418/// and their global side metadata specs. This is created in each plan's constructor, and will be passed
419/// to `CommonPlan` or `BasePlan`. Also you can create `PlanCreateSpaceArg` from this type, and use that
420/// to create spaces.
421pub struct CreateSpecificPlanArgs<'a, VM: VMBinding> {
422 pub global_args: CreateGeneralPlanArgs<'a, VM>,
423 pub constraints: &'static PlanConstraints,
424 pub global_side_metadata_specs: Vec<SideMetadataSpec>,
425}
426
427impl<VM: VMBinding> CreateSpecificPlanArgs<'_, VM> {
428 /// Get a PlanCreateSpaceArgs that can be used to create a space
429 pub fn _get_space_args(
430 &mut self,
431 name: &'static str,
432 zeroed: bool,
433 permission_exec: bool,
434 unlog_allocated_object: bool,
435 unlog_traced_object: bool,
436 vmrequest: VMRequest,
437 ) -> PlanCreateSpaceArgs<'_, VM> {
438 PlanCreateSpaceArgs {
439 name,
440 zeroed,
441 permission_exec,
442 vmrequest,
443 unlog_allocated_object,
444 unlog_traced_object,
445 global_side_metadata_specs: self.global_side_metadata_specs.clone(),
446 vm_map: self.global_args.vm_map,
447 mmapper: self.global_args.mmapper,
448 heap: self.global_args.heap,
449 constraints: self.constraints,
450 gc_trigger: self.global_args.gc_trigger.clone(),
451 scheduler: self.global_args.scheduler.clone(),
452 options: self.global_args.options.clone(),
453 global_state: self.global_args.state.clone(),
454 }
455 }
456
457 // The following are some convenience methods for common presets.
458 // These are not an exhaustive list -- it is just common presets that are used by most plans.
459
460 /// Get a preset for a nursery space (where young objects are located).
461 pub fn get_nursery_space_args(
462 &mut self,
463 name: &'static str,
464 zeroed: bool,
465 permission_exec: bool,
466 vmrequest: VMRequest,
467 ) -> PlanCreateSpaceArgs<'_, VM> {
468 // Objects are allocatd as young, and when traced, they stay young. If they are copied out of the nursery space, they will be moved to a mature space,
469 // and log bits will be set in that case by the mature space.
470 self._get_space_args(name, zeroed, permission_exec, false, false, vmrequest)
471 }
472
473 /// Get a preset for a mature space (where mature objects are located).
474 pub fn get_mature_space_args(
475 &mut self,
476 name: &'static str,
477 zeroed: bool,
478 permission_exec: bool,
479 vmrequest: VMRequest,
480 ) -> PlanCreateSpaceArgs<'_, VM> {
481 // Objects are allocated as mature (pre-tenured), and when traced, they stay mature.
482 // If an object gets copied into a mature space, the object is also mature,
483 self._get_space_args(name, zeroed, permission_exec, true, true, vmrequest)
484 }
485
486 // Get a preset for a mixed age space (where both young and mature objects are located).
487 pub fn get_mixed_age_space_args(
488 &mut self,
489 name: &'static str,
490 zeroed: bool,
491 permission_exec: bool,
492 vmrequest: VMRequest,
493 ) -> PlanCreateSpaceArgs<'_, VM> {
494 // Objects are allocated as young, and when traced, they become mature objects.
495 self._get_space_args(name, zeroed, permission_exec, false, true, vmrequest)
496 }
497
498 /// Get a preset for spaces in a non-generational plan.
499 pub fn get_normal_space_args(
500 &mut self,
501 name: &'static str,
502 zeroed: bool,
503 permission_exec: bool,
504 vmrequest: VMRequest,
505 ) -> PlanCreateSpaceArgs<'_, VM> {
506 // Non generational plan: we do not use any of the flags about log bits.
507 self._get_space_args(name, zeroed, permission_exec, false, false, vmrequest)
508 }
509
510 /// Get a preset for spaces in [`crate::plan::global::CommonPlan`].
511 /// Spaces like LOS which may include both young and mature objects should not use this method.
512 pub fn get_common_space_args(
513 &mut self,
514 generational: bool,
515 name: &'static str,
516 ) -> PlanCreateSpaceArgs<'_, VM> {
517 self.get_base_space_args(
518 generational,
519 name,
520 false, // Common spaces are not executable.
521 )
522 }
523
524 /// Get a preset for spaces in [`crate::plan::global::BasePlan`].
525 pub fn get_base_space_args(
526 &mut self,
527 generational: bool,
528 name: &'static str,
529 permission_exec: bool,
530 ) -> PlanCreateSpaceArgs<'_, VM> {
531 if generational {
532 // In generational plans, common/base spaces behave like a mature space:
533 // * the objects in these spaces are not traced in a nursery GC
534 // * the log bits for the objects are maintained exactly the same as a mature space.
535 // Thus we consider them as mature spaces.
536 self.get_mature_space_args(name, true, permission_exec, VMRequest::discontiguous())
537 } else {
538 self.get_normal_space_args(name, true, permission_exec, VMRequest::discontiguous())
539 }
540 }
541}
542
543impl<VM: VMBinding> BasePlan<VM> {
544 #[allow(unused_mut)] // 'args' only needs to be mutable for certain features
545 pub fn new(mut args: CreateSpecificPlanArgs<VM>) -> BasePlan<VM> {
546 let _generational = args.constraints.generational;
547 BasePlan {
548 #[cfg(feature = "code_space")]
549 code_space: ImmortalSpace::new(args.get_base_space_args(
550 _generational,
551 "code_space",
552 true,
553 )),
554 #[cfg(feature = "code_space")]
555 code_lo_space: ImmortalSpace::new(args.get_base_space_args(
556 _generational,
557 "code_lo_space",
558 true,
559 )),
560 #[cfg(feature = "ro_space")]
561 ro_space: ImmortalSpace::new(args.get_base_space_args(
562 _generational,
563 "ro_space",
564 false,
565 )),
566 #[cfg(feature = "vm_space")]
567 vm_space: VMSpace::new(args.get_base_space_args(
568 _generational,
569 "vm_space",
570 false, // it doesn't matter -- we are not mmapping for VM space.
571 )),
572
573 global_state: args.global_args.state.clone(),
574 gc_trigger: args.global_args.gc_trigger,
575 options: args.global_args.options,
576 scheduler: args.global_args.scheduler,
577 }
578 }
579
580 // Depends on what base spaces we use, unsync may be unused.
581 pub fn get_used_pages(&self) -> usize {
582 // Depends on what base spaces we use, pages may be unchanged.
583 #[allow(unused_mut)]
584 let mut pages = 0;
585
586 #[cfg(feature = "code_space")]
587 {
588 pages += self.code_space.reserved_pages();
589 pages += self.code_lo_space.reserved_pages();
590 }
591 #[cfg(feature = "ro_space")]
592 {
593 pages += self.ro_space.reserved_pages();
594 }
595
596 // If we need to count malloc'd size as part of our heap, we add it here.
597 #[cfg(feature = "malloc_counted_size")]
598 {
599 pages += self.global_state.get_malloc_bytes_in_pages();
600 }
601
602 // The VM space may be used as an immutable boot image, in which case, we should not count
603 // it as part of the heap size.
604 pages
605 }
606
607 pub fn prepare(&mut self, _tls: VMWorkerThread, _full_heap: bool) {
608 #[cfg(feature = "code_space")]
609 self.code_space.prepare();
610 #[cfg(feature = "code_space")]
611 self.code_lo_space.prepare();
612 #[cfg(feature = "ro_space")]
613 self.ro_space.prepare();
614 #[cfg(feature = "vm_space")]
615 self.vm_space.prepare();
616 }
617
618 pub fn release(&mut self, _tls: VMWorkerThread, _full_heap: bool) {
619 #[cfg(feature = "code_space")]
620 self.code_space.release();
621 #[cfg(feature = "code_space")]
622 self.code_lo_space.release();
623 #[cfg(feature = "ro_space")]
624 self.ro_space.release();
625 #[cfg(feature = "vm_space")]
626 self.vm_space.release();
627 }
628
629 pub fn clear_side_log_bits(&self) {
630 #[cfg(feature = "code_space")]
631 self.code_space.clear_side_log_bits();
632 #[cfg(feature = "code_space")]
633 self.code_lo_space.clear_side_log_bits();
634 #[cfg(feature = "ro_space")]
635 self.ro_space.clear_side_log_bits();
636 #[cfg(feature = "vm_space")]
637 self.vm_space.clear_side_log_bits();
638 }
639
640 pub fn set_side_log_bits(&self) {
641 #[cfg(feature = "code_space")]
642 self.code_space.set_side_log_bits();
643 #[cfg(feature = "code_space")]
644 self.code_lo_space.set_side_log_bits();
645 #[cfg(feature = "ro_space")]
646 self.ro_space.set_side_log_bits();
647 #[cfg(feature = "vm_space")]
648 self.vm_space.set_side_log_bits();
649 }
650
651 pub fn end_of_gc(&mut self, _tls: VMWorkerThread) {
652 // Do nothing here. None of the spaces needs end_of_gc.
653 }
654
655 pub(crate) fn collection_required<P: Plan>(&self, plan: &P, space_full: bool) -> bool {
656 let stress_force_gc =
657 crate::util::heap::gc_trigger::GCTrigger::<VM>::should_do_stress_gc_inner(
658 &self.global_state,
659 &self.options,
660 );
661 if stress_force_gc {
662 debug!(
663 "Stress GC: allocation_bytes = {}, stress_factor = {}",
664 self.global_state.allocation_bytes.load(Ordering::Relaxed),
665 *self.options.stress_factor
666 );
667 debug!("Doing stress GC");
668 self.global_state
669 .allocation_bytes
670 .store(0, Ordering::SeqCst);
671 }
672
673 debug!(
674 "self.get_reserved_pages()={}, self.get_total_pages()={}",
675 plan.get_reserved_pages(),
676 plan.get_total_pages()
677 );
678 // Check if we reserved more pages (including the collection copy reserve)
679 // than the heap's total pages. In that case, we will have to do a GC.
680 let heap_full = plan.base().gc_trigger.is_heap_full();
681
682 space_full || stress_force_gc || heap_full
683 }
684}
685
686cfg_if::cfg_if! {
687 // Use immortal or mark sweep as the non moving space if the features are enabled. Otherwise use Immix.
688 if #[cfg(feature = "immortal_as_nonmoving")] {
689 pub type NonMovingSpace<VM> = crate::policy::immortalspace::ImmortalSpace<VM>;
690 } else if #[cfg(feature = "marksweep_as_nonmoving")] {
691 pub type NonMovingSpace<VM> = crate::policy::marksweepspace::native_ms::MarkSweepSpace<VM>;
692 } else {
693 pub type NonMovingSpace<VM> = crate::policy::immix::ImmixSpace<VM>;
694 }
695}
696
697/**
698CommonPlan is for representing state and features used by _many_ plans, but that are not fundamental to _all_ plans. Examples include the Large Object Space and an Immortal space. Features that are fundamental to _all_ plans must be included in BasePlan.
699*/
700#[derive(HasSpaces, PlanTraceObject)]
701pub struct CommonPlan<VM: VMBinding> {
702 #[space]
703 pub immortal: ImmortalSpace<VM>,
704 #[space]
705 pub los: LargeObjectSpace<VM>,
706 #[space]
707 #[cfg_attr(
708 not(any(feature = "immortal_as_nonmoving", feature = "marksweep_as_nonmoving")),
709 post_scan
710 )] // Immix space needs post_scan
711 pub nonmoving: NonMovingSpace<VM>,
712 #[parent]
713 pub base: BasePlan<VM>,
714}
715
716impl<VM: VMBinding> CommonPlan<VM> {
717 pub fn new(mut args: CreateSpecificPlanArgs<VM>) -> CommonPlan<VM> {
718 let needs_log_bit = args.constraints.needs_log_bit;
719 let generational = args.constraints.generational;
720 CommonPlan {
721 immortal: ImmortalSpace::new(args.get_common_space_args(generational, "immortal")),
722 los: LargeObjectSpace::new(
723 // LOS is a bit special, as it is a mixed age space. It has a logical nursery.
724 if generational {
725 args.get_mixed_age_space_args("los", true, false, VMRequest::discontiguous())
726 } else {
727 args.get_normal_space_args("los", true, false, VMRequest::discontiguous())
728 },
729 false,
730 needs_log_bit,
731 ),
732 nonmoving: Self::new_nonmoving_space(&mut args),
733 base: BasePlan::new(args),
734 }
735 }
736
737 pub fn get_used_pages(&self) -> usize {
738 self.immortal.reserved_pages()
739 + self.los.reserved_pages()
740 + self.nonmoving.reserved_pages()
741 + self.base.get_used_pages()
742 }
743
744 pub fn prepare(&mut self, tls: VMWorkerThread, full_heap: bool) {
745 self.immortal.prepare();
746 self.los.prepare(full_heap);
747 self.prepare_nonmoving_space(full_heap);
748 self.base.prepare(tls, full_heap)
749 }
750
751 pub fn release(&mut self, tls: VMWorkerThread, full_heap: bool) {
752 self.immortal.release();
753 self.los.release(full_heap);
754 self.release_nonmoving_space(full_heap);
755 self.base.release(tls, full_heap)
756 }
757
758 pub(crate) fn schedule_unlog_bits_op(&mut self, unlog_bits_op: UnlogBitsOperation) {
759 if VM::VMObjectModel::GLOBAL_LOG_BIT_SPEC.is_on_side() {
760 // # Safety: CommonPlan reference is always valid within this collection cycle.
761 let common_plan = unsafe { &*(self as *const CommonPlan<VM>) };
762
763 match unlog_bits_op {
764 UnlogBitsOperation::NoOp => {}
765 UnlogBitsOperation::BulkSet => {
766 self.base.scheduler.work_buckets[WorkBucketStage::Prepare]
767 .add(SetCommonPlanUnlogBits { common_plan });
768 }
769 UnlogBitsOperation::BulkClear => {
770 self.base.scheduler.work_buckets[WorkBucketStage::Release]
771 .add(ClearCommonPlanUnlogBits { common_plan });
772 }
773 }
774 }
775 }
776
777 pub fn clear_side_log_bits(&self) {
778 self.immortal.clear_side_log_bits();
779 self.los.clear_side_log_bits();
780 self.base.clear_side_log_bits();
781 }
782
783 pub fn set_side_log_bits(&self) {
784 self.immortal.set_side_log_bits();
785 self.los.set_side_log_bits();
786 self.base.set_side_log_bits();
787 }
788
789 pub fn end_of_gc(&mut self, tls: VMWorkerThread) {
790 self.end_of_gc_nonmoving_space();
791 self.base.end_of_gc(tls);
792 }
793
794 pub fn get_immortal(&self) -> &ImmortalSpace<VM> {
795 &self.immortal
796 }
797
798 pub fn get_los(&self) -> &LargeObjectSpace<VM> {
799 &self.los
800 }
801
802 pub fn get_nonmoving(&self) -> &NonMovingSpace<VM> {
803 &self.nonmoving
804 }
805
806 fn new_nonmoving_space(args: &mut CreateSpecificPlanArgs<VM>) -> NonMovingSpace<VM> {
807 let space_args = args.get_common_space_args(args.constraints.generational, "nonmoving");
808 cfg_if::cfg_if! {
809 if #[cfg(any(feature = "immortal_as_nonmoving", feature = "marksweep_as_nonmoving"))] {
810 NonMovingSpace::new(space_args)
811 } else {
812 // Immix requires extra args.
813 NonMovingSpace::new(
814 space_args,
815 crate::policy::immix::ImmixSpaceArgs {
816 mixed_age: false,
817 never_move_objects: true,
818 },
819 )
820 }
821 }
822 }
823
824 fn prepare_nonmoving_space(&mut self, _full_heap: bool) {
825 cfg_if::cfg_if! {
826 if #[cfg(feature = "immortal_as_nonmoving")] {
827 self.nonmoving.prepare();
828 } else if #[cfg(feature = "marksweep_as_nonmoving")] {
829 self.nonmoving.prepare(_full_heap);
830 } else {
831 self.nonmoving.prepare(_full_heap, None, UnlogBitsOperation::NoOp);
832 }
833 }
834 }
835
836 fn release_nonmoving_space(&mut self, _full_heap: bool) {
837 cfg_if::cfg_if! {
838 if #[cfg(feature = "immortal_as_nonmoving")] {
839 self.nonmoving.release();
840 } else if #[cfg(feature = "marksweep_as_nonmoving")] {
841 self.nonmoving.prepare(_full_heap);
842 } else {
843 self.nonmoving.release(_full_heap, UnlogBitsOperation::NoOp);
844 }
845 }
846 }
847
848 fn end_of_gc_nonmoving_space(&mut self) {
849 cfg_if::cfg_if! {
850 if #[cfg(feature = "immortal_as_nonmoving")] {
851 // Nothing we need to do for immortal space.
852 } else if #[cfg(feature = "marksweep_as_nonmoving")] {
853 self.nonmoving.end_of_gc();
854 } else {
855 self.nonmoving.end_of_gc();
856 }
857 }
858 }
859}
860
861use crate::policy::gc_work::TraceKind;
862use crate::vm::VMBinding;
863
864/// A trait for anything that contains spaces.
865/// Examples include concrete plans as well as `Gen`, `CommonPlan` and `BasePlan`.
866/// All plans must implement this trait.
867///
868/// This trait provides methods for enumerating spaces in a struct, including spaces in nested
869/// struct.
870///
871/// This trait can be implemented automatically by adding the `#[derive(HasSpaces)]` attribute to a
872/// struct. It uses the derive macro defined in the `mmtk-macros` crate.
873///
874/// This trait visits spaces as `dyn`, so it should only be used when performance is not critical.
875/// For performance critical methods that visit spaces in a plan, such as `trace_object`, it is
876/// recommended to define a trait (such as `PlanTraceObject`) for concrete plans to implement, and
877/// implement (by hand or automatically) the method without `dyn`.
878pub trait HasSpaces {
879 // The type of the VM.
880 type VM: VMBinding;
881
882 /// Visit each space field immutably.
883 ///
884 /// If `Self` contains nested fields that contain more spaces, this method shall visit spaces
885 /// in the outer struct first.
886 fn for_each_space(&self, func: &mut dyn FnMut(&dyn Space<Self::VM>));
887
888 /// Visit each space field mutably.
889 ///
890 /// If `Self` contains nested fields that contain more spaces, this method shall visit spaces
891 /// in the outer struct first.
892 fn for_each_space_mut(&mut self, func: &mut dyn FnMut(&mut dyn Space<Self::VM>));
893}
894
895/// A plan that uses `PlanProcessEdges` needs to provide an implementation for this trait.
896/// Generally a plan does not need to manually implement this trait. Instead, we provide
897/// a procedural macro that helps generate an implementation. Please check `macros/trace_object`.
898///
899/// A plan could also manually implement this trait. For the sake of performance, the implementation
900/// of this trait should mark methods as `[inline(always)]`.
901pub trait PlanTraceObject<VM: VMBinding> {
902 /// Trace objects in the plan. Generally one needs to figure out
903 /// which space an object resides in, and invokes the corresponding policy
904 /// trace object method.
905 ///
906 /// Arguments:
907 /// * `trace`: the current transitive closure
908 /// * `object`: the object to trace.
909 /// * `worker`: the GC worker that is tracing this object.
910 fn trace_object<Q: ObjectQueue, const KIND: TraceKind>(
911 &self,
912 queue: &mut Q,
913 object: ObjectReference,
914 worker: &mut GCWorker<VM>,
915 ) -> ObjectReference;
916
917 /// Post-scan objects in the plan. Each object is scanned by `VM::VMScanning::scan_object()`, and this function
918 /// will be called after the `VM::VMScanning::scan_object()` as a hook to invoke possible policy post scan method.
919 /// If a plan does not have any policy that needs post scan, this method can be implemented as empty.
920 /// If a plan has a policy that has some policy specific behaviors for scanning (e.g. mark lines in Immix),
921 /// this method should also invoke those policy specific methods for objects in that space.
922 fn post_scan_object(&self, object: ObjectReference);
923
924 /// Whether objects in this plan may move. If any of the spaces used by the plan may move objects, this should
925 /// return true.
926 fn may_move_objects<const KIND: TraceKind>() -> bool;
927}
928
929use enum_map::Enum;
930/// Allocation semantics that MMTk provides.
931/// Each allocation request requires a desired semantic for the object to allocate.
932#[repr(i32)]
933#[derive(Clone, Copy, Debug, Enum, PartialEq, Eq)]
934pub enum AllocationSemantics {
935 /// The default semantic. This means there is no specific requirement for the allocation.
936 /// The actual semantic of the default will depend on the GC plan in use.
937 Default = 0,
938 /// Immortal objects will not be reclaimed. MMTk still traces immortal objects, but will not
939 /// reclaim the objects even if they are dead.
940 Immortal = 1,
941 /// Large objects. It is usually desirable to allocate large objects specially. Large objects
942 /// are allocated with page granularity and will not be moved.
943 /// Each plan provides `max_non_los_default_alloc_bytes` (see [`crate::plan::PlanConstraints`]),
944 /// which defines a threshold for objects that can be allocated with the default semantic. Any object that is larger than the
945 /// threshold must be allocated with the `Los` semantic.
946 /// This semantic may get removed and MMTk will transparently allocate into large object space for large objects.
947 Los = 2,
948 /// Code objects have execution permission.
949 /// Note that this is a place holder for now. Currently all the memory MMTk allocates has execution permission.
950 Code = 3,
951 /// Read-only objects cannot be mutated once it is initialized.
952 /// Note that this is a place holder for now. It does not provide read only semantic.
953 ReadOnly = 4,
954 /// Los + Code.
955 LargeCode = 5,
956 /// Non moving objects will not be moved by GC.
957 NonMoving = 6,
958}