1use crate::global_state::GlobalState;
2use crate::plan::PlanConstraints;
3use crate::scheduler::GCWorkScheduler;
4use crate::util::conversions::*;
5use crate::util::metadata::side_metadata::{
6 SideMetadataContext, SideMetadataSanity, SideMetadataSpec,
7};
8use crate::util::object_enum::ObjectEnumerator;
9use crate::util::Address;
10use crate::util::ObjectReference;
11
12use crate::util::heap::layout::vm_layout::{vm_layout, LOG_BYTES_IN_CHUNK};
13use crate::util::heap::{PageResource, VMRequest};
14use crate::util::options::Options;
15use crate::vm::{ActivePlan, Collection};
16
17use crate::util::constants::LOG_BYTES_IN_MBYTE;
18use crate::util::conversions;
19use crate::util::opaque_pointer::*;
20
21use crate::mmtk::SFT_MAP;
22#[cfg(debug_assertions)]
23use crate::policy::sft::EMPTY_SFT_NAME;
24use crate::policy::sft::SFT;
25use crate::util::alloc::allocator::AllocationOptions;
26use crate::util::copy::*;
27use crate::util::heap::gc_trigger::GCTrigger;
28use crate::util::heap::layout::vm_layout::BYTES_IN_CHUNK;
29use crate::util::heap::layout::Mmapper;
30use crate::util::heap::layout::VMMap;
31use crate::util::heap::space_descriptor::SpaceDescriptor;
32use crate::util::heap::HeapMeta;
33use crate::util::os::*;
34use crate::vm::VMBinding;
35
36use std::marker::PhantomData;
37use std::sync::atomic::AtomicBool;
38use std::sync::Arc;
39use std::sync::Mutex;
40
41use downcast_rs::Downcast;
42
43pub trait Space<VM: VMBinding>: 'static + SFT + Sync + Downcast {
44 fn as_space(&self) -> &dyn Space<VM>;
45 fn as_sft(&self) -> &(dyn SFT + Sync + 'static);
46 fn get_page_resource(&self) -> &dyn PageResource<VM>;
47
48 fn maybe_get_page_resource_mut(&mut self) -> Option<&mut dyn PageResource<VM>>;
51
52 fn initialize_sft(&self, sft_map: &mut dyn crate::policy::sft_map::SFTMap);
56
57 fn acquire(&self, tls: VMThread, pages: usize, alloc_options: AllocationOptions) -> Address {
58 trace!(
59 "Space.acquire, tls={:?}, alloc_options={:?}",
60 tls,
61 alloc_options
62 );
63
64 debug_assert!(
65 !self.get_gc_trigger().will_oom_on_alloc(pages << crate::util::constants::LOG_BYTES_IN_PAGE),
66 "The requested pages is larger than the max heap size. Is will_go_oom_on_acquire used before acquring memory?"
67 );
68
69 trace!("Reserving pages");
70 let pr = self.get_page_resource();
71 let pages_reserved = pr.reserve_pages(pages);
72 trace!("Pages reserved");
73
74 let should_poll = VM::VMActivePlan::is_mutator(tls);
77
78 let gc_triggered = should_poll && {
81 trace!("Polling ..");
82 self.get_gc_trigger().poll(false, Some(self.as_space()))
83 };
84
85 let should_get_pages = !gc_triggered || alloc_options.allow_overcommit;
89
90 if should_get_pages {
94 if let Some(addr) = self.get_new_pages_and_initialize(tls, pages, pr, pages_reserved) {
95 addr
96 } else {
97 self.not_acquiring(tls, alloc_options, pr, pages_reserved, true);
98 Address::ZERO
99 }
100 } else {
101 self.not_acquiring(tls, alloc_options, pr, pages_reserved, false);
102 Address::ZERO
103 }
104 }
105
106 fn get_new_pages_and_initialize(
115 &self,
116 tls: VMThread,
117 pages: usize,
118 pr: &dyn PageResource<VM>,
119 pages_reserved: usize,
120 ) -> Option<Address> {
121 let lock = self.common().acquire_lock.lock().unwrap();
128
129 let Ok(res) = pr.get_new_pages(self.common().descriptor, pages_reserved, pages, tls) else {
130 return None;
131 };
132
133 debug!(
134 "Got new pages {} ({} pages) for {} in chunk {}, new_chunk? {}",
135 res.start,
136 res.pages,
137 self.get_name(),
138 conversions::chunk_align_down(res.start),
139 res.new_chunk
140 );
141 let bytes = conversions::pages_to_bytes(res.pages);
142 #[cfg(debug_assertions)]
143 self.common()
144 .metadata
145 .assert_metadata_ranges_in_reserved_range(res.start, bytes, self.get_name());
146
147 let mmap = || {
148 if let Err(mmap_error) = self
151 .common()
152 .mmapper
153 .ensure_mapped(
154 res.start,
155 res.pages,
156 self.common()
157 .options
158 .transparent_hugepages_as_huge_page_support(),
159 self.common().mmap_protection(),
160 &MmapAnnotation::Space {
161 name: self.get_name(),
162 },
163 )
164 .and(self.common().metadata.try_map_metadata_space(
165 res.start,
166 bytes,
167 self.get_name(),
168 ))
169 {
170 OS::handle_mmap_error::<VM>(mmap_error, tls);
171 }
172 };
173 let grow_space = || {
174 self.grow_space(res.start, bytes, res.new_chunk);
175 };
176
177 if SFT_MAP.get_side_metadata().is_some() {
179 mmap();
181 grow_space();
183 drop(lock);
185 } else {
186 grow_space();
188 drop(lock);
189 mmap();
191 }
192
193 if self.common().zeroed {
195 crate::util::memory::zero(res.start, bytes);
196 }
197
198 {
200 debug_assert_eq!(SFT_MAP.get_checked(res.start).name(), self.get_name());
203 debug_assert!(self.address_in_space(res.start));
205 debug_assert_eq!(
207 self.common().vm_map().get_descriptor_for_address(res.start),
208 self.common().descriptor
209 );
210
211 let last_byte = res.start + bytes - 1;
213 debug_assert_eq!(SFT_MAP.get_checked(last_byte).name(), self.get_name());
215 debug_assert!(self.address_in_space(last_byte));
217 debug_assert_eq!(
219 self.common().vm_map().get_descriptor_for_address(last_byte),
220 self.common().descriptor
221 );
222 }
223
224 debug!("Space.acquire(), returned = {}", res.start);
225 Some(res.start)
226 }
227
228 fn not_acquiring(
234 &self,
235 tls: VMThread,
236 alloc_options: AllocationOptions,
237 pr: &dyn PageResource<VM>,
238 pages_reserved: usize,
239 attempted_allocation_and_failed: bool,
240 ) {
241 assert!(
242 VM::VMActivePlan::is_mutator(tls),
243 "A non-mutator thread failed to get pages from page resource. \
244 Copying GC plans should compute the copying headroom carefully to prevent this."
245 );
246
247 pr.clear_request(pages_reserved);
249
250 if !alloc_options.at_safepoint {
252 return;
253 }
254
255 debug!("Collection required");
256
257 if !self.common().global_state.is_initialized() {
258 panic!(
260 "GC is not allowed here: collection is not initialized \
261 (did you call initialize_collection()?). \
262 Out of physical memory: {phy}",
263 phy = attempted_allocation_and_failed
264 );
265 }
266
267 if attempted_allocation_and_failed {
268 let gc_performed = self.get_gc_trigger().poll(true, Some(self.as_space()));
270 debug_assert!(gc_performed, "GC not performed when forced.");
271 }
272
273 let meta_pages_reserved = self.estimate_side_meta_pages(pages_reserved);
275 let total_pages_reserved = pages_reserved + meta_pages_reserved;
276 self.get_gc_trigger()
277 .policy
278 .on_pending_allocation(total_pages_reserved);
279
280 VM::VMCollection::block_for_gc(VMMutatorThread(tls)); }
282
283 fn address_in_space(&self, start: Address) -> bool {
284 if !self.common().descriptor.is_contiguous() {
285 self.common().vm_map().get_descriptor_for_address(start) == self.common().descriptor
286 } else {
287 start >= self.common().start && start < self.common().start + self.common().extent
288 }
289 }
290
291 fn in_space(&self, object: ObjectReference) -> bool {
292 self.address_in_space(object.to_raw_address())
293 }
294
295 fn grow_space(&self, start: Address, bytes: usize, new_chunk: bool) {
305 trace!(
306 "Grow space from {} for {} bytes (new chunk = {})",
307 start,
308 bytes,
309 new_chunk
310 );
311
312 #[cfg(debug_assertions)]
314 if !new_chunk {
315 debug_assert!(
316 SFT_MAP.get_checked(start).name() != EMPTY_SFT_NAME,
317 "In grow_space(start = {}, bytes = {}, new_chunk = {}), we have empty SFT entries (chunk for {} = {})",
318 start,
319 bytes,
320 new_chunk,
321 start,
322 SFT_MAP.get_checked(start).name()
323 );
324 debug_assert!(
325 SFT_MAP.get_checked(start + bytes - 1).name() != EMPTY_SFT_NAME,
326 "In grow_space(start = {}, bytes = {}, new_chunk = {}), we have empty SFT entries (chunk for {} = {})",
327 start,
328 bytes,
329 new_chunk,
330 start + bytes - 1,
331 SFT_MAP.get_checked(start + bytes - 1).name()
332 );
333 }
334
335 if new_chunk {
336 unsafe { SFT_MAP.update(self.as_sft(), start, bytes) };
337 }
338 }
339
340 fn estimate_side_meta_pages(&self, data_pages: usize) -> usize {
348 self.common().metadata.calculate_reserved_pages(data_pages)
349 }
350
351 fn reserved_pages(&self) -> usize {
352 let data_pages = self.get_page_resource().reserved_pages();
353 let meta_pages = self.estimate_side_meta_pages(data_pages);
354 data_pages + meta_pages
355 }
356
357 fn available_physical_pages(&self) -> usize {
359 self.get_page_resource().get_available_physical_pages()
360 }
361
362 fn get_name(&self) -> &'static str {
363 self.common().name
364 }
365
366 fn get_descriptor(&self) -> SpaceDescriptor {
367 self.common().descriptor
368 }
369
370 fn common(&self) -> &CommonSpace<VM>;
371 fn get_gc_trigger(&self) -> &GCTrigger<VM> {
372 self.common().gc_trigger.as_ref()
373 }
374
375 fn release_multiple_pages(&mut self, start: Address);
376
377 fn set_copy_for_sft_trace(&mut self, _semantics: Option<CopySemantics>) {
380 panic!("A copying space should override this method")
381 }
382
383 fn verify_side_metadata_sanity(&self, side_metadata_sanity_checker: &mut SideMetadataSanity) {
393 side_metadata_sanity_checker
394 .verify_metadata_context(std::any::type_name::<Self>(), &self.common().metadata)
395 }
396
397 fn enumerate_objects(&self, enumerator: &mut dyn ObjectEnumerator);
418
419 fn set_allocate_as_live(&self, live: bool) {
420 self.common()
421 .allocate_as_live
422 .store(live, std::sync::atomic::Ordering::SeqCst);
423 }
424
425 fn should_allocate_as_live(&self) -> bool {
426 self.common()
427 .allocate_as_live
428 .load(std::sync::atomic::Ordering::Acquire)
429 }
430
431 fn clear_side_log_bits(&self);
434
435 fn set_side_log_bits(&self);
438}
439
440#[allow(unused)]
444pub(crate) fn print_vm_map<VM: VMBinding>(
445 space: &dyn Space<VM>,
446 out: &mut impl std::fmt::Write,
447) -> Result<(), std::fmt::Error> {
448 let common = space.common();
449 write!(out, "{} ", common.name)?;
450 if common.immortal {
451 write!(out, "I")?;
452 } else {
453 write!(out, " ")?;
454 }
455 if common.movable {
456 write!(out, " ")?;
457 } else {
458 write!(out, "N")?;
459 }
460 write!(out, " ")?;
461 if common.contiguous {
462 write!(
463 out,
464 "{}->{}",
465 common.start,
466 common.start + common.extent - 1
467 )?;
468 match common.vmrequest {
469 VMRequest::Extent { extent, .. } => {
470 write!(out, " E {}", extent)?;
471 }
472 VMRequest::Fraction { frac, .. } => {
473 write!(out, " F {}", frac)?;
474 }
475 _ => {}
476 }
477 } else {
478 let mut a = space
479 .get_page_resource()
480 .common()
481 .get_head_discontiguous_region();
482 while !a.is_zero() {
483 write!(
484 out,
485 "{}->{}",
486 a,
487 a + space.common().vm_map().get_contiguous_region_size(a) - 1
488 )?;
489 a = space.common().vm_map().get_next_contiguous_region(a);
490 if !a.is_zero() {
491 write!(out, " ")?;
492 }
493 }
494 }
495 writeln!(out)?;
496
497 Ok(())
498}
499
500impl_downcast!(Space<VM> where VM: VMBinding);
501
502pub struct CommonSpace<VM: VMBinding> {
503 pub name: &'static str,
504 pub descriptor: SpaceDescriptor,
505 pub vmrequest: VMRequest,
506
507 pub copy: Option<CopySemantics>,
510
511 pub immortal: bool,
512 pub movable: bool,
513 pub contiguous: bool,
514 pub zeroed: bool,
515
516 pub permission_exec: bool,
517
518 pub start: Address,
519 pub extent: usize,
520
521 pub vm_map: &'static dyn VMMap,
522 pub mmapper: &'static dyn Mmapper,
523
524 pub(crate) metadata: SideMetadataContext,
525
526 pub needs_log_bit: bool,
529 pub unlog_allocated_object: bool,
530 pub unlog_traced_object: bool,
531
532 pub acquire_lock: Mutex<()>,
534
535 pub gc_trigger: Arc<GCTrigger<VM>>,
536 pub global_state: Arc<GlobalState>,
537 pub options: Arc<Options>,
538
539 pub allocate_as_live: AtomicBool,
540
541 p: PhantomData<VM>,
542}
543
544pub struct PolicyCreateSpaceArgs<'a, VM: VMBinding> {
546 pub plan_args: PlanCreateSpaceArgs<'a, VM>,
547 pub movable: bool,
548 pub immortal: bool,
549 pub local_side_metadata_specs: Vec<SideMetadataSpec>,
550}
551
552pub struct PlanCreateSpaceArgs<'a, VM: VMBinding> {
554 pub name: &'static str,
555 pub zeroed: bool,
556 pub permission_exec: bool,
557 pub unlog_allocated_object: bool,
558 pub unlog_traced_object: bool,
559 pub vmrequest: VMRequest,
560 pub global_side_metadata_specs: Vec<SideMetadataSpec>,
561 pub vm_map: &'static dyn VMMap,
562 pub mmapper: &'static dyn Mmapper,
563 pub heap: &'a mut HeapMeta,
564 pub constraints: &'a PlanConstraints,
565 pub gc_trigger: Arc<GCTrigger<VM>>,
566 pub scheduler: Arc<GCWorkScheduler<VM>>,
567 pub options: Arc<Options>,
568 pub global_state: Arc<GlobalState>,
569}
570
571impl<'a, VM: VMBinding> PlanCreateSpaceArgs<'a, VM> {
572 pub fn into_policy_args(
574 self,
575 movable: bool,
576 immortal: bool,
577 policy_metadata_specs: Vec<SideMetadataSpec>,
578 ) -> PolicyCreateSpaceArgs<'a, VM> {
579 PolicyCreateSpaceArgs {
580 movable,
581 immortal,
582 local_side_metadata_specs: policy_metadata_specs,
583 plan_args: self,
584 }
585 }
586}
587
588impl<VM: VMBinding> CommonSpace<VM> {
589 pub fn new(args: PolicyCreateSpaceArgs<VM>) -> Self {
590 let mut rtn = CommonSpace {
591 name: args.plan_args.name,
592 descriptor: SpaceDescriptor::UNINITIALIZED,
593 vmrequest: args.plan_args.vmrequest,
594 copy: None,
595 immortal: args.immortal,
596 movable: args.movable,
597 contiguous: true,
598 permission_exec: args.plan_args.permission_exec,
599 zeroed: args.plan_args.zeroed,
600 start: unsafe { Address::zero() },
601 extent: 0,
602 vm_map: args.plan_args.vm_map,
603 mmapper: args.plan_args.mmapper,
604 needs_log_bit: args.plan_args.constraints.needs_log_bit,
605 unlog_allocated_object: args.plan_args.unlog_allocated_object,
606 unlog_traced_object: args.plan_args.unlog_traced_object,
607 gc_trigger: args.plan_args.gc_trigger,
608 metadata: SideMetadataContext {
609 global: args.plan_args.global_side_metadata_specs,
610 local: args.local_side_metadata_specs,
611 },
612 acquire_lock: Mutex::new(()),
613 global_state: args.plan_args.global_state,
614 options: args.plan_args.options.clone(),
615 allocate_as_live: AtomicBool::new(false),
616 p: PhantomData,
617 };
618
619 let vmrequest = args.plan_args.vmrequest;
620 if vmrequest.is_discontiguous() {
621 rtn.contiguous = false;
622 rtn.descriptor = SpaceDescriptor::create_descriptor();
624 return rtn;
626 }
627
628 let (extent, top) = match vmrequest {
629 VMRequest::Fraction { frac, top: _top } => (get_frac_available(frac), _top),
630 VMRequest::Extent {
631 extent: _extent,
632 top: _top,
633 } => (_extent, _top),
634 VMRequest::Fixed {
635 extent: _extent, ..
636 } => (_extent, false),
637 _ => unreachable!(),
638 };
639
640 assert!(
641 extent == raw_align_up(extent, BYTES_IN_CHUNK),
642 "{} requested non-aligned extent: {} bytes",
643 rtn.name,
644 extent
645 );
646
647 let start = if let VMRequest::Fixed { start: _start, .. } = vmrequest {
648 _start
649 } else {
650 args.plan_args.heap.reserve(extent, top)
653 };
654 assert!(
655 start == chunk_align_up(start),
656 "{} starting on non-aligned boundary: {}",
657 rtn.name,
658 start
659 );
660
661 rtn.contiguous = true;
662 rtn.start = start;
663 rtn.extent = extent;
664 rtn.descriptor = SpaceDescriptor::create_descriptor_from_heap_range(start, start + extent);
666 {
672 use crate::util::heap::layout;
673 let overlap =
674 Address::range_intersection(&(start..start + extent), &layout::available_range());
675 if !overlap.is_empty() {
676 args.plan_args.vm_map.insert(
677 overlap.start,
678 overlap.end - overlap.start,
679 rtn.descriptor,
680 );
681 }
682 }
683
684 debug!(
685 "Created space {} [{}, {}) for {} bytes",
686 rtn.name,
687 start,
688 start + extent,
689 extent
690 );
691
692 rtn
693 }
694
695 pub fn initialize_sft(
696 &self,
697 sft: &(dyn SFT + Sync + 'static),
698 sft_map: &mut dyn crate::policy::sft_map::SFTMap,
699 ) {
700 if self.contiguous {
707 unsafe { sft_map.eager_initialize(sft, self.start, self.extent) };
708 }
709 }
710
711 pub fn vm_map(&self) -> &'static dyn VMMap {
712 self.vm_map
713 }
714
715 pub fn mmap_protection(&self) -> MmapProtection {
716 if self.permission_exec || cfg!(feature = "exec_permission_on_all_spaces") {
717 MmapProtection::ReadWriteExec
718 } else {
719 MmapProtection::ReadWrite
720 }
721 }
722
723 pub(crate) fn debug_print_object_global_info(&self, object: ObjectReference) {
724 #[cfg(feature = "vo_bit")]
725 println!(
726 "vo bit = {}",
727 crate::util::metadata::vo_bit::is_vo_bit_set(object)
728 );
729 if self.needs_log_bit {
730 use crate::vm::object_model::ObjectModel;
731 use std::sync::atomic::Ordering;
732 println!(
733 "log bit = {}",
734 VM::VMObjectModel::GLOBAL_LOG_BIT_SPEC.is_unlogged::<VM>(object, Ordering::Relaxed),
735 );
736 }
737 println!("is live = {}", object.is_live());
738 }
739}
740
741fn get_frac_available(frac: f32) -> usize {
742 trace!("AVAILABLE_START={}", vm_layout().available_start());
743 trace!("AVAILABLE_END={}", vm_layout().available_end());
744 let bytes = (frac * vm_layout().available_bytes() as f32) as usize;
745 trace!("bytes={}*{}={}", frac, vm_layout().available_bytes(), bytes);
746 let mb = bytes >> LOG_BYTES_IN_MBYTE;
747 let rtn = mb << LOG_BYTES_IN_MBYTE;
748 trace!("rtn={}", rtn);
749 let aligned_rtn = raw_align_up(rtn, BYTES_IN_CHUNK);
750 trace!("aligned_rtn={}", aligned_rtn);
751 aligned_rtn
752}
753
754pub fn required_chunks(pages: usize) -> usize {
755 let extent = raw_align_up(pages_to_bytes(pages), BYTES_IN_CHUNK);
756 extent >> LOG_BYTES_IN_CHUNK
757}