mmtk/plan/global.rs
1//! The global part of a plan implementation.
2
3use super::PlanConstraints;
4use crate::global_state::GlobalState;
5use crate::mmtk::MMTK;
6use crate::plan::gc_work::{ClearCommonPlanUnlogBits, SetCommonPlanUnlogBits};
7use crate::plan::tracing::ObjectQueue;
8use crate::plan::Mutator;
9use crate::policy::immortalspace::ImmortalSpace;
10use crate::policy::largeobjectspace::LargeObjectSpace;
11use crate::policy::space::{PlanCreateSpaceArgs, Space};
12#[cfg(feature = "vm_space")]
13use crate::policy::vmspace::VMSpace;
14use crate::scheduler::*;
15use crate::util::alloc::allocators::AllocatorSelector;
16use crate::util::copy::{CopyConfig, GCWorkerCopyContext};
17use crate::util::heap::gc_trigger::GCTrigger;
18use crate::util::heap::gc_trigger::SpaceStats;
19use crate::util::heap::layout::Mmapper;
20use crate::util::heap::layout::VMMap;
21use crate::util::heap::HeapMeta;
22use crate::util::heap::VMRequest;
23use crate::util::metadata::log_bit::UnlogBitsOperation;
24use crate::util::metadata::side_metadata::SideMetadataSanity;
25use crate::util::metadata::side_metadata::SideMetadataSpec;
26use crate::util::options::Options;
27use crate::util::options::PlanSelector;
28use crate::util::statistics::stats::Stats;
29use crate::util::{conversions, ObjectReference};
30use crate::util::{VMMutatorThread, VMWorkerThread};
31use crate::vm::*;
32use downcast_rs::Downcast;
33use enum_map::EnumMap;
34use std::sync::atomic::Ordering;
35use std::sync::Arc;
36
37use mmtk_macros::{HasSpaces, PlanTraceObject};
38
39pub fn create_mutator<VM: VMBinding>(
40 tls: VMMutatorThread,
41 mmtk: &'static MMTK<VM>,
42) -> Box<Mutator<VM>> {
43 Box::new(match *mmtk.options.plan {
44 PlanSelector::NoGC => crate::plan::nogc::mutator::create_nogc_mutator(tls, mmtk),
45 PlanSelector::SemiSpace => crate::plan::semispace::mutator::create_ss_mutator(tls, mmtk),
46 PlanSelector::GenCopy => {
47 crate::plan::generational::copying::mutator::create_gencopy_mutator(tls, mmtk)
48 }
49 PlanSelector::GenImmix => {
50 crate::plan::generational::immix::mutator::create_genimmix_mutator(tls, mmtk)
51 }
52 PlanSelector::MarkSweep => crate::plan::marksweep::mutator::create_ms_mutator(tls, mmtk),
53 PlanSelector::Immix => crate::plan::immix::mutator::create_immix_mutator(tls, mmtk),
54 PlanSelector::PageProtect => {
55 crate::plan::pageprotect::mutator::create_pp_mutator(tls, mmtk)
56 }
57 PlanSelector::MarkCompact => {
58 crate::plan::markcompact::mutator::create_markcompact_mutator(tls, mmtk)
59 }
60 PlanSelector::StickyImmix => {
61 crate::plan::sticky::immix::mutator::create_stickyimmix_mutator(tls, mmtk)
62 }
63 PlanSelector::ConcurrentImmix => {
64 crate::plan::concurrent::immix::mutator::create_concurrent_immix_mutator(tls, mmtk)
65 }
66 PlanSelector::Compressor => {
67 crate::plan::compressor::mutator::create_compressor_mutator(tls, mmtk)
68 }
69 })
70}
71
72pub fn create_plan<VM: VMBinding>(
73 plan: PlanSelector,
74 args: CreateGeneralPlanArgs<VM>,
75) -> Box<dyn Plan<VM = VM>> {
76 match plan {
77 PlanSelector::NoGC => {
78 Box::new(crate::plan::nogc::NoGC::new(args)) as Box<dyn Plan<VM = VM>>
79 }
80 PlanSelector::SemiSpace => {
81 Box::new(crate::plan::semispace::SemiSpace::new(args)) as Box<dyn Plan<VM = VM>>
82 }
83 PlanSelector::GenCopy => Box::new(crate::plan::generational::copying::GenCopy::new(args))
84 as Box<dyn Plan<VM = VM>>,
85 PlanSelector::GenImmix => Box::new(crate::plan::generational::immix::GenImmix::new(args))
86 as Box<dyn Plan<VM = VM>>,
87 PlanSelector::MarkSweep => {
88 Box::new(crate::plan::marksweep::MarkSweep::new(args)) as Box<dyn Plan<VM = VM>>
89 }
90 PlanSelector::Immix => {
91 Box::new(crate::plan::immix::Immix::new(args)) as Box<dyn Plan<VM = VM>>
92 }
93 PlanSelector::PageProtect => {
94 Box::new(crate::plan::pageprotect::PageProtect::new(args)) as Box<dyn Plan<VM = VM>>
95 }
96 PlanSelector::MarkCompact => {
97 Box::new(crate::plan::markcompact::MarkCompact::new(args)) as Box<dyn Plan<VM = VM>>
98 }
99 PlanSelector::StickyImmix => {
100 Box::new(crate::plan::sticky::immix::StickyImmix::new(args)) as Box<dyn Plan<VM = VM>>
101 }
102 PlanSelector::ConcurrentImmix => {
103 Box::new(crate::plan::concurrent::immix::ConcurrentImmix::new(args))
104 as Box<dyn Plan<VM = VM>>
105 }
106 PlanSelector::Compressor => {
107 Box::new(crate::plan::compressor::Compressor::new(args)) as Box<dyn Plan<VM = VM>>
108 }
109 }
110}
111
112/// Create thread local GC worker.
113pub fn create_gc_worker_context<VM: VMBinding>(
114 tls: VMWorkerThread,
115 mmtk: &'static MMTK<VM>,
116) -> GCWorkerCopyContext<VM> {
117 GCWorkerCopyContext::<VM>::new(tls, mmtk, mmtk.get_plan().create_copy_config())
118}
119
120/// A plan describes the global core functionality for all memory management schemes.
121/// All global MMTk plans should implement this trait.
122///
123/// The global instance defines and manages static resources
124/// (such as memory and virtual memory resources).
125///
126/// Constructor:
127///
128/// For the constructor of a new plan, there are a few things the constructor _must_ do
129/// (please check existing plans and see what they do in the constructor):
130/// 1. Create a HeapMeta, and use this HeapMeta to initialize all the spaces.
131/// 2. Create a vector of all the side metadata specs with `SideMetadataContext::new_global_specs()`,
132/// the parameter is a vector of global side metadata specs that are specific to the plan.
133/// 3. Initialize all the spaces the plan uses with the heap meta, and the global metadata specs vector.
134/// 4. Invoke the `verify_side_metadata_sanity()` method of the plan.
135/// It will create a `SideMetadataSanity` object, and invoke verify_side_metadata_sanity() for each space (or
136/// invoke verify_side_metadata_sanity() in `CommonPlan`/`BasePlan` for the spaces in the common/base plan).
137///
138/// Methods in this trait:
139///
140/// Only methods that will be overridden by each specific plan should be included in this trait. The trait may
141/// provide a default implementation, and each plan can override the implementation. For methods that won't be
142/// overridden, we should implement those methods in BasePlan (or CommonPlan) and call them from there instead.
143/// We should avoid having methods with the same name in both Plan and BasePlan, as this may confuse people, and
144/// they may call a wrong method by mistake.
145// TODO: Some methods that are not overriden can be moved from the trait to BasePlan.
146pub trait Plan: 'static + HasSpaces + Sync + Downcast {
147 /// Get the plan constraints for the plan.
148 /// This returns a non-constant value. A constant value can be found in each plan's module if needed.
149 fn constraints(&self) -> &'static PlanConstraints;
150
151 /// Create a copy config for this plan. A copying GC plan MUST override this method,
152 /// and provide a valid config.
153 fn create_copy_config(&'static self) -> CopyConfig<Self::VM> {
154 // Use the empty default copy config for non copying GC.
155 CopyConfig::default()
156 }
157
158 /// Get a immutable reference to the base plan. `BasePlan` is included by all the MMTk GC plans.
159 fn base(&self) -> &BasePlan<Self::VM>;
160
161 /// Get a mutable reference to the base plan. `BasePlan` is included by all the MMTk GC plans.
162 fn base_mut(&mut self) -> &mut BasePlan<Self::VM>;
163
164 /// Schedule work for the upcoming GC.
165 fn schedule_collection(&'static self, _scheduler: &GCWorkScheduler<Self::VM>);
166
167 /// Get the common plan. CommonPlan is included by most of MMTk GC plans.
168 fn common(&self) -> &CommonPlan<Self::VM> {
169 panic!("Common Plan not handled!")
170 }
171
172 /// Return a reference to `GenerationalPlan` to allow
173 /// access methods specific to generational plans if the plan is a generational plan.
174 fn generational(
175 &self,
176 ) -> Option<&dyn crate::plan::generational::global::GenerationalPlan<VM = Self::VM>> {
177 None
178 }
179
180 /// Return a reference to `ConcurrentPlan` to allow
181 /// access methods specific to concurrent plans if the plan is a concurrent plan.
182 fn concurrent(
183 &self,
184 ) -> Option<&dyn crate::plan::concurrent::global::ConcurrentPlan<VM = Self::VM>> {
185 None
186 }
187
188 /// Get the current run time options.
189 fn options(&self) -> &Options {
190 &self.base().options
191 }
192
193 /// Get the allocator mapping between [`crate::AllocationSemantics`] and [`crate::util::alloc::AllocatorSelector`].
194 /// This defines what space this plan will allocate objects into for different semantics.
195 fn get_allocator_mapping(&self) -> &'static EnumMap<AllocationSemantics, AllocatorSelector>;
196
197 /// Called when all mutators are paused. This is called before prepare.
198 fn notify_mutators_paused(&self, _scheduler: &GCWorkScheduler<Self::VM>) {}
199
200 /// Prepare the plan before a GC. This is invoked in an initial step in the GC.
201 /// This is invoked once per GC by one worker thread. `tls` is the worker thread that executes this method.
202 fn prepare(&mut self, tls: VMWorkerThread);
203
204 /// Prepare a worker for a GC. Each worker has its own prepare method. This hook is for plan-specific
205 /// per-worker preparation. This method is invoked once per worker by the worker thread passed as the argument.
206 fn prepare_worker(&self, _worker: &mut GCWorker<Self::VM>) {}
207
208 /// Release the plan after transitive closure. A plan can implement this method to call each policy's release,
209 /// or create any work packet that should be done in release.
210 /// This is invoked once per GC by one worker thread. `tls` is the worker thread that executes this method.
211 fn release(&mut self, tls: VMWorkerThread);
212
213 /// Inform the plan about the end of a GC. It is guaranteed that there is no further work for this GC.
214 /// This is invoked once per GC by one worker thread. `tls` is the worker thread that executes this method.
215 // TODO: This is actually called at the end of a pause/STW, rather than the end of a GC. It should be renamed.
216 fn end_of_gc(&mut self, _tls: VMWorkerThread);
217
218 /// Notify the plan that an emergency collection will happen. The plan should try to free as much memory as possible.
219 /// The default implementation will force a full heap collection for generational plans.
220 fn notify_emergency_collection(&self) {
221 if let Some(gen) = self.generational() {
222 gen.force_full_heap_collection();
223 }
224 }
225
226 /// Ask the plan if they would trigger a GC. If MMTk is in charge of triggering GCs, this method is called
227 /// periodically during allocation. However, MMTk may delegate the GC triggering decision to the runtime,
228 /// in which case, this method may not be called. This method returns true to trigger a collection.
229 ///
230 /// # Arguments
231 /// * `space_full`: the allocation to a specific space failed, must recover pages within 'space'.
232 /// * `space`: an option to indicate if there is a space that has failed in an allocation.
233 fn collection_required(&self, space_full: bool, space: Option<SpaceStats<Self::VM>>) -> bool;
234
235 // Note: The following methods are about page accounting. The default implementation should
236 // work fine for non-copying plans. For copying plans, the plan should override any of these methods
237 // if necessary.
238
239 /// Get the number of pages that are reserved, including pages used by MMTk spaces, pages that
240 /// will be used (e.g. for copying), and live pages allocated outside MMTk spaces as reported
241 /// by the VM binding.
242 fn get_reserved_pages(&self) -> usize {
243 let used_pages = self.get_used_pages();
244 let collection_reserve = self.get_collection_reserved_pages();
245 let vm_live_bytes = <Self::VM as VMBinding>::VMCollection::vm_live_bytes();
246 // Note that `vm_live_bytes` may not be the exact number of bytes in whole pages. The VM
247 // binding is allowed to return an approximate value if it is expensive or impossible to
248 // compute the exact number of pages occupied.
249 let vm_live_pages = conversions::bytes_to_pages_up(vm_live_bytes);
250 let total = used_pages + collection_reserve + vm_live_pages;
251
252 trace!(
253 "Reserved pages = {}, used pages: {}, collection reserve: {}, VM live pages: {}",
254 total,
255 used_pages,
256 collection_reserve,
257 vm_live_pages,
258 );
259
260 total
261 }
262
263 /// Get the total number of pages for the heap.
264 fn get_total_pages(&self) -> usize {
265 self.base()
266 .gc_trigger
267 .policy
268 .get_current_heap_size_in_pages()
269 }
270
271 /// Get the number of pages that are still available for use. The available pages
272 /// should always be positive or 0.
273 fn get_available_pages(&self) -> usize {
274 let reserved_pages = self.get_reserved_pages();
275 let total_pages = self.get_total_pages();
276
277 // It is possible that the reserved pages is larger than the total pages so we are doing
278 // a saturating subtraction to make sure we return a non-negative number.
279 // For example,
280 // 1. our GC trigger checks if reserved pages is more than total pages.
281 // 2. when the heap is almost full of live objects (such as in the case of an OOM) and we are doing a copying GC, it is possible
282 // the reserved pages is larger than total pages after the copying GC (the reserved pages after a GC
283 // may be larger than the reserved pages before a GC, as we may end up using more memory for thread local
284 // buffers for copy allocators).
285 // 3. the binding disabled GC, and we end up over-allocating beyond the total pages determined by the GC trigger.
286 let available_pages = total_pages.saturating_sub(reserved_pages);
287 trace!(
288 "Total pages = {}, reserved pages = {}, available pages = {}",
289 total_pages,
290 reserved_pages,
291 available_pages,
292 );
293 available_pages
294 }
295
296 /// Get the number of pages that are reserved for collection. By default, we return 0.
297 /// For copying plans, they need to override this and calculate required pages to complete
298 /// a copying GC.
299 fn get_collection_reserved_pages(&self) -> usize {
300 0
301 }
302
303 /// Get the number of pages that are used.
304 fn get_used_pages(&self) -> usize;
305
306 /// Get the number of pages that are NOT used. This is clearly different from available pages.
307 /// Free pages are unused, but some of them may have been reserved for some reason.
308 fn get_free_pages(&self) -> usize {
309 let total_pages = self.get_total_pages();
310 let used_pages = self.get_used_pages();
311
312 // It is possible that the used pages is larger than the total pages, so we use saturating
313 // subtraction. See the comments in `get_available_pages`.
314 total_pages.saturating_sub(used_pages)
315 }
316
317 /// Return whether last GC was an exhaustive attempt to collect the heap.
318 /// For example, for generational GCs, minor collection is not an exhaustive collection.
319 /// For example, for Immix, fast collection (no defragmentation) is not an exhaustive collection.
320 fn last_collection_was_exhaustive(&self) -> bool {
321 true
322 }
323
324 /// Return whether the current GC may move any object. The VM binding can make use of this
325 /// information and choose to or not to update some data structures that record the addresses
326 /// of objects.
327 ///
328 /// This function is callable during a GC. From the VM binding's point of view, the information
329 /// of whether the current GC moves object or not is available since `Collection::stop_mutators`
330 /// is called, and remains available until (but not including) `resume_mutators` at which time
331 /// the current GC has just finished.
332 fn current_gc_may_move_object(&self) -> bool;
333
334 /// An object is firstly reached by a sanity GC. So the object is reachable
335 /// in the current GC, and all the GC work has been done for the object (such as
336 /// tracing and releasing). A plan can implement this to
337 /// use plan specific semantics to check if the object is sane.
338 /// Return true if the object is considered valid by the plan.
339 fn sanity_check_object(&self, _object: ObjectReference) -> bool {
340 true
341 }
342
343 /// Call `space.verify_side_metadata_sanity` for all spaces in this plan.
344 fn verify_side_metadata_sanity(&self) {
345 let mut side_metadata_sanity_checker = SideMetadataSanity::new();
346 self.for_each_space(&mut |space| {
347 space.verify_side_metadata_sanity(&mut side_metadata_sanity_checker);
348 })
349 }
350
351 /// Call `space.initialize_sft` for all spaces in this plan, and notify the SFT map about the creation of each space.
352 /// This method should only be called after 1. side metadata is initialized (as some SFT maps may use side metadata), 2. the plan is created in the heap and won't be moved,
353 /// and 3. the side metadata sanity is initialized (otherwise we may try access side metadata and trigger sanity check before side metadata sanity is initialized)
354 fn initialize_sft(&self) {
355 let sft_map: &mut dyn crate::policy::sft_map::SFTMap =
356 unsafe { crate::mmtk::SFT_MAP.get_mut() }.as_mut();
357 self.for_each_space(&mut |s| {
358 sft_map.notify_space_creation(s.as_sft());
359 s.initialize_sft(sft_map);
360 });
361 }
362}
363
364impl_downcast!(Plan assoc VM);
365
366/**
367BasePlan should contain all plan-related state and functions that are _fundamental_ to _all_ plans. These include VM-specific (but not plan-specific) features such as a code space or vm space, which are fundamental to all plans for a given VM. Features that are common to _many_ (but not intrinsically _all_) plans should instead be included in CommonPlan.
368*/
369#[derive(HasSpaces, PlanTraceObject)]
370pub struct BasePlan<VM: VMBinding> {
371 pub(crate) global_state: Arc<GlobalState>,
372 pub options: Arc<Options>,
373 pub gc_trigger: Arc<GCTrigger<VM>>,
374 pub scheduler: Arc<GCWorkScheduler<VM>>,
375
376 // Spaces in base plan
377 #[cfg(feature = "code_space")]
378 #[space]
379 pub code_space: ImmortalSpace<VM>,
380 #[cfg(feature = "code_space")]
381 #[space]
382 pub code_lo_space: ImmortalSpace<VM>,
383 #[cfg(feature = "ro_space")]
384 #[space]
385 pub ro_space: ImmortalSpace<VM>,
386
387 /// A VM space is a space allocated and populated by the VM. Currently it is used by JikesRVM
388 /// for boot image.
389 ///
390 /// If VM space is present, it has some special interaction with the
391 /// `memory_manager::is_mmtk_object` and the `memory_manager::is_in_mmtk_spaces` functions.
392 ///
393 /// - The functions `is_mmtk_object` and `find_object_from_internal_pointer` require
394 /// the valid object (VO) bit side metadata to identify objects.
395 /// If the binding maintains the VO bit for objects in VM spaces, those functions will work accordingly.
396 /// Otherwise, calling them is undefined behavior.
397 ///
398 /// - The `is_in_mmtk_spaces` currently returns `true` if the given object reference is in
399 /// the VM space.
400 #[cfg(feature = "vm_space")]
401 #[space]
402 pub vm_space: VMSpace<VM>,
403}
404
405/// Args needed for creating any plan. This includes a set of contexts from MMTK or global. This
406/// is passed to each plan's constructor.
407pub struct CreateGeneralPlanArgs<'a, VM: VMBinding> {
408 pub vm_map: &'static dyn VMMap,
409 pub mmapper: &'static dyn Mmapper,
410 pub options: Arc<Options>,
411 pub state: Arc<GlobalState>,
412 pub gc_trigger: Arc<crate::util::heap::gc_trigger::GCTrigger<VM>>,
413 pub scheduler: Arc<GCWorkScheduler<VM>>,
414 pub stats: &'a Stats,
415 pub heap: &'a mut HeapMeta,
416}
417
418/// Args needed for creating a specific plan. This includes plan-specific args, such as plan constrainst
419/// and their global side metadata specs. This is created in each plan's constructor, and will be passed
420/// to `CommonPlan` or `BasePlan`. Also you can create `PlanCreateSpaceArg` from this type, and use that
421/// to create spaces.
422pub struct CreateSpecificPlanArgs<'a, VM: VMBinding> {
423 pub global_args: CreateGeneralPlanArgs<'a, VM>,
424 pub constraints: &'static PlanConstraints,
425 pub global_side_metadata_specs: Vec<SideMetadataSpec>,
426}
427
428impl<VM: VMBinding> CreateSpecificPlanArgs<'_, VM> {
429 /// Get a PlanCreateSpaceArgs that can be used to create a space
430 pub fn _get_space_args(
431 &mut self,
432 name: &'static str,
433 zeroed: bool,
434 permission_exec: bool,
435 unlog_allocated_object: bool,
436 unlog_traced_object: bool,
437 vmrequest: VMRequest,
438 ) -> PlanCreateSpaceArgs<'_, VM> {
439 PlanCreateSpaceArgs {
440 name,
441 zeroed,
442 permission_exec,
443 vmrequest,
444 unlog_allocated_object,
445 unlog_traced_object,
446 global_side_metadata_specs: self.global_side_metadata_specs.clone(),
447 vm_map: self.global_args.vm_map,
448 mmapper: self.global_args.mmapper,
449 heap: self.global_args.heap,
450 constraints: self.constraints,
451 gc_trigger: self.global_args.gc_trigger.clone(),
452 scheduler: self.global_args.scheduler.clone(),
453 options: self.global_args.options.clone(),
454 global_state: self.global_args.state.clone(),
455 }
456 }
457
458 // The following are some convenience methods for common presets.
459 // These are not an exhaustive list -- it is just common presets that are used by most plans.
460
461 /// Get a preset for a nursery space (where young objects are located).
462 pub fn get_nursery_space_args(
463 &mut self,
464 name: &'static str,
465 zeroed: bool,
466 permission_exec: bool,
467 vmrequest: VMRequest,
468 ) -> PlanCreateSpaceArgs<'_, VM> {
469 // Objects are allocatd as young, and when traced, they stay young. If they are copied out of the nursery space, they will be moved to a mature space,
470 // and log bits will be set in that case by the mature space.
471 self._get_space_args(name, zeroed, permission_exec, false, false, vmrequest)
472 }
473
474 /// Get a preset for a mature space (where mature objects are located).
475 pub fn get_mature_space_args(
476 &mut self,
477 name: &'static str,
478 zeroed: bool,
479 permission_exec: bool,
480 vmrequest: VMRequest,
481 ) -> PlanCreateSpaceArgs<'_, VM> {
482 // Objects are allocated as mature (pre-tenured), and when traced, they stay mature.
483 // If an object gets copied into a mature space, the object is also mature,
484 self._get_space_args(name, zeroed, permission_exec, true, true, vmrequest)
485 }
486
487 // Get a preset for a mixed age space (where both young and mature objects are located).
488 pub fn get_mixed_age_space_args(
489 &mut self,
490 name: &'static str,
491 zeroed: bool,
492 permission_exec: bool,
493 vmrequest: VMRequest,
494 ) -> PlanCreateSpaceArgs<'_, VM> {
495 // Objects are allocated as young, and when traced, they become mature objects.
496 self._get_space_args(name, zeroed, permission_exec, false, true, vmrequest)
497 }
498
499 /// Get a preset for spaces in a non-generational plan.
500 pub fn get_normal_space_args(
501 &mut self,
502 name: &'static str,
503 zeroed: bool,
504 permission_exec: bool,
505 vmrequest: VMRequest,
506 ) -> PlanCreateSpaceArgs<'_, VM> {
507 // Non generational plan: we do not use any of the flags about log bits.
508 self._get_space_args(name, zeroed, permission_exec, false, false, vmrequest)
509 }
510
511 /// Get a preset for spaces in [`crate::plan::global::CommonPlan`].
512 /// Spaces like LOS which may include both young and mature objects should not use this method.
513 pub fn get_common_space_args(
514 &mut self,
515 generational: bool,
516 name: &'static str,
517 ) -> PlanCreateSpaceArgs<'_, VM> {
518 self.get_base_space_args(
519 generational,
520 name,
521 false, // Common spaces are not executable.
522 )
523 }
524
525 /// Get a preset for spaces in [`crate::plan::global::BasePlan`].
526 pub fn get_base_space_args(
527 &mut self,
528 generational: bool,
529 name: &'static str,
530 permission_exec: bool,
531 ) -> PlanCreateSpaceArgs<'_, VM> {
532 if generational {
533 // In generational plans, common/base spaces behave like a mature space:
534 // * the objects in these spaces are not traced in a nursery GC
535 // * the log bits for the objects are maintained exactly the same as a mature space.
536 // Thus we consider them as mature spaces.
537 self.get_mature_space_args(name, true, permission_exec, VMRequest::discontiguous())
538 } else {
539 self.get_normal_space_args(name, true, permission_exec, VMRequest::discontiguous())
540 }
541 }
542}
543
544impl<VM: VMBinding> BasePlan<VM> {
545 #[allow(unused_mut)] // 'args' only needs to be mutable for certain features
546 pub fn new(mut args: CreateSpecificPlanArgs<VM>) -> BasePlan<VM> {
547 let _generational = args.constraints.generational;
548 BasePlan {
549 #[cfg(feature = "code_space")]
550 code_space: ImmortalSpace::new(args.get_base_space_args(
551 _generational,
552 "code_space",
553 true,
554 )),
555 #[cfg(feature = "code_space")]
556 code_lo_space: ImmortalSpace::new(args.get_base_space_args(
557 _generational,
558 "code_lo_space",
559 true,
560 )),
561 #[cfg(feature = "ro_space")]
562 ro_space: ImmortalSpace::new(args.get_base_space_args(
563 _generational,
564 "ro_space",
565 false,
566 )),
567 #[cfg(feature = "vm_space")]
568 vm_space: VMSpace::new(args.get_base_space_args(
569 _generational,
570 "vm_space",
571 false, // it doesn't matter -- we are not mmapping for VM space.
572 )),
573
574 global_state: args.global_args.state.clone(),
575 gc_trigger: args.global_args.gc_trigger,
576 options: args.global_args.options,
577 scheduler: args.global_args.scheduler,
578 }
579 }
580
581 // Depends on what base spaces we use, unsync may be unused.
582 pub fn get_used_pages(&self) -> usize {
583 // Depends on what base spaces we use, pages may be unchanged.
584 #[allow(unused_mut)]
585 let mut pages = 0;
586
587 #[cfg(feature = "code_space")]
588 {
589 pages += self.code_space.reserved_pages();
590 pages += self.code_lo_space.reserved_pages();
591 }
592 #[cfg(feature = "ro_space")]
593 {
594 pages += self.ro_space.reserved_pages();
595 }
596
597 // If we need to count malloc'd size as part of our heap, we add it here.
598 #[cfg(feature = "malloc_counted_size")]
599 {
600 pages += self.global_state.get_malloc_bytes_in_pages();
601 }
602
603 // The VM space may be used as an immutable boot image, in which case, we should not count
604 // it as part of the heap size.
605 pages
606 }
607
608 pub fn prepare(&mut self, _tls: VMWorkerThread, _full_heap: bool) {
609 #[cfg(feature = "code_space")]
610 self.code_space.prepare();
611 #[cfg(feature = "code_space")]
612 self.code_lo_space.prepare();
613 #[cfg(feature = "ro_space")]
614 self.ro_space.prepare();
615 #[cfg(feature = "vm_space")]
616 self.vm_space.prepare();
617 }
618
619 pub fn release(&mut self, _tls: VMWorkerThread, _full_heap: bool) {
620 #[cfg(feature = "code_space")]
621 self.code_space.release();
622 #[cfg(feature = "code_space")]
623 self.code_lo_space.release();
624 #[cfg(feature = "ro_space")]
625 self.ro_space.release();
626 #[cfg(feature = "vm_space")]
627 self.vm_space.release();
628 }
629
630 pub fn clear_side_log_bits(&self) {
631 #[cfg(feature = "code_space")]
632 self.code_space.clear_side_log_bits();
633 #[cfg(feature = "code_space")]
634 self.code_lo_space.clear_side_log_bits();
635 #[cfg(feature = "ro_space")]
636 self.ro_space.clear_side_log_bits();
637 #[cfg(feature = "vm_space")]
638 self.vm_space.clear_side_log_bits();
639 }
640
641 pub fn set_side_log_bits(&self) {
642 #[cfg(feature = "code_space")]
643 self.code_space.set_side_log_bits();
644 #[cfg(feature = "code_space")]
645 self.code_lo_space.set_side_log_bits();
646 #[cfg(feature = "ro_space")]
647 self.ro_space.set_side_log_bits();
648 #[cfg(feature = "vm_space")]
649 self.vm_space.set_side_log_bits();
650 }
651
652 pub fn end_of_gc(&mut self, _tls: VMWorkerThread) {
653 // Do nothing here. None of the spaces needs end_of_gc.
654 }
655
656 pub(crate) fn collection_required<P: Plan>(&self, plan: &P, space_full: bool) -> bool {
657 let stress_force_gc =
658 crate::util::heap::gc_trigger::GCTrigger::<VM>::should_do_stress_gc_inner(
659 &self.global_state,
660 &self.options,
661 );
662 if stress_force_gc {
663 debug!(
664 "Stress GC: allocation_bytes = {}, stress_factor = {}",
665 self.global_state.allocation_bytes.load(Ordering::Relaxed),
666 *self.options.stress_factor
667 );
668 debug!("Doing stress GC");
669 self.global_state
670 .allocation_bytes
671 .store(0, Ordering::SeqCst);
672 }
673
674 debug!(
675 "self.get_reserved_pages()={}, self.get_total_pages()={}",
676 plan.get_reserved_pages(),
677 plan.get_total_pages()
678 );
679 // Check if we reserved more pages (including the collection copy reserve)
680 // than the heap's total pages. In that case, we will have to do a GC.
681 let heap_full = plan.base().gc_trigger.is_heap_full();
682
683 space_full || stress_force_gc || heap_full
684 }
685}
686
687cfg_if::cfg_if! {
688 // Use immortal or mark sweep as the non moving space if the features are enabled. Otherwise use Immix.
689 if #[cfg(feature = "immortal_as_nonmoving")] {
690 pub type NonMovingSpace<VM> = crate::policy::immortalspace::ImmortalSpace<VM>;
691 } else if #[cfg(feature = "marksweep_as_nonmoving")] {
692 pub type NonMovingSpace<VM> = crate::policy::marksweepspace::native_ms::MarkSweepSpace<VM>;
693 } else {
694 pub type NonMovingSpace<VM> = crate::policy::immix::ImmixSpace<VM>;
695 }
696}
697
698/**
699CommonPlan is for representing state and features used by _many_ plans, but that are not fundamental to _all_ plans. Examples include the Large Object Space and an Immortal space. Features that are fundamental to _all_ plans must be included in BasePlan.
700*/
701#[derive(HasSpaces, PlanTraceObject)]
702pub struct CommonPlan<VM: VMBinding> {
703 #[space]
704 pub immortal: ImmortalSpace<VM>,
705 #[space]
706 pub los: LargeObjectSpace<VM>,
707 #[space]
708 #[cfg_attr(
709 not(any(feature = "immortal_as_nonmoving", feature = "marksweep_as_nonmoving")),
710 post_scan
711 )] // Immix space needs post_scan
712 pub nonmoving: NonMovingSpace<VM>,
713 #[parent]
714 pub base: BasePlan<VM>,
715}
716
717impl<VM: VMBinding> CommonPlan<VM> {
718 pub fn new(mut args: CreateSpecificPlanArgs<VM>) -> CommonPlan<VM> {
719 let needs_log_bit = args.constraints.needs_log_bit;
720 let generational = args.constraints.generational;
721 CommonPlan {
722 immortal: ImmortalSpace::new(args.get_common_space_args(generational, "immortal")),
723 los: LargeObjectSpace::new(
724 // LOS is a bit special, as it is a mixed age space. It has a logical nursery.
725 if generational {
726 args.get_mixed_age_space_args("los", true, false, VMRequest::discontiguous())
727 } else {
728 args.get_normal_space_args("los", true, false, VMRequest::discontiguous())
729 },
730 false,
731 needs_log_bit,
732 ),
733 nonmoving: Self::new_nonmoving_space(&mut args),
734 base: BasePlan::new(args),
735 }
736 }
737
738 pub fn get_used_pages(&self) -> usize {
739 self.immortal.reserved_pages()
740 + self.los.reserved_pages()
741 + self.nonmoving.reserved_pages()
742 + self.base.get_used_pages()
743 }
744
745 pub fn prepare(&mut self, tls: VMWorkerThread, full_heap: bool) {
746 self.immortal.prepare();
747 self.los.prepare(full_heap);
748 self.prepare_nonmoving_space(full_heap);
749 self.base.prepare(tls, full_heap)
750 }
751
752 pub fn release(&mut self, tls: VMWorkerThread, full_heap: bool) {
753 self.immortal.release();
754 self.los.release(full_heap);
755 self.release_nonmoving_space(full_heap);
756 self.base.release(tls, full_heap)
757 }
758
759 pub(crate) fn schedule_unlog_bits_op(&mut self, unlog_bits_op: UnlogBitsOperation) {
760 if VM::VMObjectModel::GLOBAL_LOG_BIT_SPEC.is_on_side() {
761 // # Safety: CommonPlan reference is always valid within this collection cycle.
762 let common_plan = unsafe { &*(self as *const CommonPlan<VM>) };
763
764 match unlog_bits_op {
765 UnlogBitsOperation::NoOp => {}
766 UnlogBitsOperation::BulkSet => {
767 self.base.scheduler.work_buckets[WorkBucketStage::Prepare]
768 .add(SetCommonPlanUnlogBits { common_plan });
769 }
770 UnlogBitsOperation::BulkClear => {
771 self.base.scheduler.work_buckets[WorkBucketStage::Release]
772 .add(ClearCommonPlanUnlogBits { common_plan });
773 }
774 }
775 }
776 }
777
778 pub fn clear_side_log_bits(&self) {
779 self.immortal.clear_side_log_bits();
780 self.los.clear_side_log_bits();
781 self.base.clear_side_log_bits();
782 }
783
784 pub fn set_side_log_bits(&self) {
785 self.immortal.set_side_log_bits();
786 self.los.set_side_log_bits();
787 self.base.set_side_log_bits();
788 }
789
790 pub fn end_of_gc(&mut self, tls: VMWorkerThread) {
791 self.end_of_gc_nonmoving_space();
792 self.base.end_of_gc(tls);
793 }
794
795 pub fn get_immortal(&self) -> &ImmortalSpace<VM> {
796 &self.immortal
797 }
798
799 pub fn get_los(&self) -> &LargeObjectSpace<VM> {
800 &self.los
801 }
802
803 pub fn get_nonmoving(&self) -> &NonMovingSpace<VM> {
804 &self.nonmoving
805 }
806
807 fn new_nonmoving_space(args: &mut CreateSpecificPlanArgs<VM>) -> NonMovingSpace<VM> {
808 let space_args = args.get_common_space_args(args.constraints.generational, "nonmoving");
809 cfg_if::cfg_if! {
810 if #[cfg(any(feature = "immortal_as_nonmoving", feature = "marksweep_as_nonmoving"))] {
811 NonMovingSpace::new(space_args)
812 } else {
813 // Immix requires extra args.
814 NonMovingSpace::new(
815 space_args,
816 crate::policy::immix::ImmixSpaceArgs {
817 mixed_age: false,
818 never_move_objects: true,
819 },
820 )
821 }
822 }
823 }
824
825 fn prepare_nonmoving_space(&mut self, _full_heap: bool) {
826 cfg_if::cfg_if! {
827 if #[cfg(feature = "immortal_as_nonmoving")] {
828 self.nonmoving.prepare();
829 } else if #[cfg(feature = "marksweep_as_nonmoving")] {
830 self.nonmoving.prepare(_full_heap);
831 } else {
832 self.nonmoving.prepare(_full_heap, None, UnlogBitsOperation::NoOp);
833 }
834 }
835 }
836
837 fn release_nonmoving_space(&mut self, _full_heap: bool) {
838 cfg_if::cfg_if! {
839 if #[cfg(feature = "immortal_as_nonmoving")] {
840 self.nonmoving.release();
841 } else if #[cfg(feature = "marksweep_as_nonmoving")] {
842 self.nonmoving.prepare(_full_heap);
843 } else {
844 self.nonmoving.release(_full_heap, UnlogBitsOperation::NoOp);
845 }
846 }
847 }
848
849 fn end_of_gc_nonmoving_space(&mut self) {
850 cfg_if::cfg_if! {
851 if #[cfg(feature = "immortal_as_nonmoving")] {
852 // Nothing we need to do for immortal space.
853 } else if #[cfg(feature = "marksweep_as_nonmoving")] {
854 self.nonmoving.end_of_gc();
855 } else {
856 self.nonmoving.end_of_gc();
857 }
858 }
859 }
860}
861
862use crate::policy::gc_work::TraceKind;
863use crate::vm::VMBinding;
864
865/// A trait for anything that contains spaces.
866/// Examples include concrete plans as well as `Gen`, `CommonPlan` and `BasePlan`.
867/// All plans must implement this trait.
868///
869/// This trait provides methods for enumerating spaces in a struct, including spaces in nested
870/// struct.
871///
872/// This trait can be implemented automatically by adding the `#[derive(HasSpaces)]` attribute to a
873/// struct. It uses the derive macro defined in the `mmtk-macros` crate.
874///
875/// This trait visits spaces as `dyn`, so it should only be used when performance is not critical.
876/// For performance critical methods that visit spaces in a plan, such as `trace_object`, it is
877/// recommended to define a trait (such as `PlanTraceObject`) for concrete plans to implement, and
878/// implement (by hand or automatically) the method without `dyn`.
879pub trait HasSpaces {
880 // The type of the VM.
881 type VM: VMBinding;
882
883 /// Visit each space field immutably.
884 ///
885 /// If `Self` contains nested fields that contain more spaces, this method shall visit spaces
886 /// in the outer struct first.
887 fn for_each_space(&self, func: &mut dyn FnMut(&dyn Space<Self::VM>));
888
889 /// Visit each space field mutably.
890 ///
891 /// If `Self` contains nested fields that contain more spaces, this method shall visit spaces
892 /// in the outer struct first.
893 fn for_each_space_mut(&mut self, func: &mut dyn FnMut(&mut dyn Space<Self::VM>));
894}
895
896/// A plan that uses `PlanProcessEdges` needs to provide an implementation for this trait.
897/// Generally a plan does not need to manually implement this trait. Instead, we provide
898/// a procedural macro that helps generate an implementation. Please check `macros/trace_object`.
899///
900/// A plan could also manually implement this trait. For the sake of performance, the implementation
901/// of this trait should mark methods as `[inline(always)]`.
902pub trait PlanTraceObject<VM: VMBinding> {
903 /// Trace objects in the plan. Generally one needs to figure out
904 /// which space an object resides in, and invokes the corresponding policy
905 /// trace object method.
906 ///
907 /// Arguments:
908 /// * `trace`: the current transitive closure
909 /// * `object`: the object to trace.
910 /// * `worker`: the GC worker that is tracing this object.
911 fn trace_object<Q: ObjectQueue, const KIND: TraceKind>(
912 &self,
913 queue: &mut Q,
914 object: ObjectReference,
915 worker: &mut GCWorker<VM>,
916 ) -> ObjectReference;
917
918 /// Post-scan objects in the plan. Each object is scanned by `VM::VMScanning::scan_object()`, and this function
919 /// will be called after the `VM::VMScanning::scan_object()` as a hook to invoke possible policy post scan method.
920 /// If a plan does not have any policy that needs post scan, this method can be implemented as empty.
921 /// If a plan has a policy that has some policy specific behaviors for scanning (e.g. mark lines in Immix),
922 /// this method should also invoke those policy specific methods for objects in that space.
923 fn post_scan_object(&self, object: ObjectReference);
924
925 /// Whether objects in this plan may move. If any of the spaces used by the plan may move objects, this should
926 /// return true.
927 fn may_move_objects<const KIND: TraceKind>() -> bool;
928}
929
930use enum_map::Enum;
931/// Allocation semantics that MMTk provides.
932/// Each allocation request requires a desired semantic for the object to allocate.
933#[repr(i32)]
934#[derive(Clone, Copy, Debug, Enum, PartialEq, Eq)]
935pub enum AllocationSemantics {
936 /// The default semantic. This means there is no specific requirement for the allocation.
937 /// The actual semantic of the default will depend on the GC plan in use.
938 Default = 0,
939 /// Immortal objects will not be reclaimed. MMTk still traces immortal objects, but will not
940 /// reclaim the objects even if they are dead.
941 Immortal = 1,
942 /// Large objects. It is usually desirable to allocate large objects specially. Large objects
943 /// are allocated with page granularity and will not be moved.
944 /// Each plan provides `max_non_los_default_alloc_bytes` (see [`crate::plan::PlanConstraints`]),
945 /// which defines a threshold for objects that can be allocated with the default semantic. Any object that is larger than the
946 /// threshold must be allocated with the `Los` semantic.
947 /// This semantic may get removed and MMTk will transparently allocate into large object space for large objects.
948 Los = 2,
949 /// Code objects have execution permission.
950 /// Note that this is a place holder for now. Currently all the memory MMTk allocates has execution permission.
951 Code = 3,
952 /// Read-only objects cannot be mutated once it is initialized.
953 /// Note that this is a place holder for now. It does not provide read only semantic.
954 ReadOnly = 4,
955 /// Los + Code.
956 LargeCode = 5,
957 /// Non moving objects will not be moved by GC.
958 NonMoving = 6,
959}