1use crate::global_state::GlobalState;
2use crate::plan::PlanConstraints;
3use crate::scheduler::GCWorkScheduler;
4use crate::util::conversions::*;
5use crate::util::metadata::side_metadata::{
6 SideMetadataContext, SideMetadataSanity, SideMetadataSpec,
7};
8use crate::util::object_enum::ObjectEnumerator;
9use crate::util::Address;
10use crate::util::ObjectReference;
11
12use crate::util::heap::layout::vm_layout::{vm_layout, LOG_BYTES_IN_CHUNK};
13use crate::util::heap::{PageResource, VMRequest};
14use crate::util::options::Options;
15use crate::vm::{ActivePlan, Collection};
16
17use crate::util::constants::{LOG_BYTES_IN_MBYTE, LOG_BYTES_IN_PAGE};
18use crate::util::conversions;
19use crate::util::opaque_pointer::*;
20
21use crate::mmtk::SFT_MAP;
22#[cfg(debug_assertions)]
23use crate::policy::sft::EMPTY_SFT_NAME;
24use crate::policy::sft::SFT;
25use crate::util::alloc::allocator::AllocationOptions;
26use crate::util::copy::*;
27use crate::util::heap::gc_trigger::GCTrigger;
28use crate::util::heap::layout::vm_layout::BYTES_IN_CHUNK;
29use crate::util::heap::layout::Mmapper;
30use crate::util::heap::layout::VMMap;
31use crate::util::heap::space_descriptor::SpaceDescriptor;
32use crate::util::heap::HeapMeta;
33use crate::util::memory::{self, HugePageSupport, MmapProtection, MmapStrategy};
34use crate::vm::VMBinding;
35
36use std::marker::PhantomData;
37use std::sync::atomic::AtomicBool;
38use std::sync::Arc;
39use std::sync::Mutex;
40
41use downcast_rs::Downcast;
42
43pub trait Space<VM: VMBinding>: 'static + SFT + Sync + Downcast {
44 fn as_space(&self) -> &dyn Space<VM>;
45 fn as_sft(&self) -> &(dyn SFT + Sync + 'static);
46 fn get_page_resource(&self) -> &dyn PageResource<VM>;
47
48 fn maybe_get_page_resource_mut(&mut self) -> Option<&mut dyn PageResource<VM>>;
51
52 fn initialize_sft(&self, sft_map: &mut dyn crate::policy::sft_map::SFTMap);
56
57 fn will_oom_on_acquire(&self, size: usize) -> bool {
73 let max_pages = self.get_gc_trigger().policy.get_max_heap_size_in_pages();
74 let requested_pages = size >> LOG_BYTES_IN_PAGE;
75 requested_pages > max_pages
76 }
77
78 fn handle_obvious_oom_request(
82 &self,
83 tls: VMThread,
84 size: usize,
85 alloc_options: AllocationOptions,
86 ) -> bool {
87 if self.will_oom_on_acquire(size) {
88 if alloc_options.allow_oom_call {
89 VM::VMCollection::out_of_memory(
90 tls,
91 crate::util::alloc::AllocationError::HeapOutOfMemory,
92 );
93 }
94 return true;
95 }
96 false
97 }
98
99 fn acquire(&self, tls: VMThread, pages: usize, alloc_options: AllocationOptions) -> Address {
100 trace!(
101 "Space.acquire, tls={:?}, alloc_options={:?}",
102 tls,
103 alloc_options
104 );
105
106 debug_assert!(
107 !self.will_oom_on_acquire(pages << LOG_BYTES_IN_PAGE),
108 "The requested pages is larger than the max heap size. Is will_go_oom_on_acquire used before acquring memory?"
109 );
110
111 trace!("Reserving pages");
112 let pr = self.get_page_resource();
113 let pages_reserved = pr.reserve_pages(pages);
114 trace!("Pages reserved");
115
116 let should_poll = VM::VMActivePlan::is_mutator(tls);
119
120 let gc_triggered = should_poll && {
123 trace!("Polling ..");
124 self.get_gc_trigger().poll(false, Some(self.as_space()))
125 };
126
127 let should_get_pages = !gc_triggered || alloc_options.allow_overcommit;
131
132 if should_get_pages {
136 if let Some(addr) = self.get_new_pages_and_initialize(tls, pages, pr, pages_reserved) {
137 addr
138 } else {
139 self.not_acquiring(tls, alloc_options, pr, pages_reserved, true);
140 Address::ZERO
141 }
142 } else {
143 self.not_acquiring(tls, alloc_options, pr, pages_reserved, false);
144 Address::ZERO
145 }
146 }
147
148 fn get_new_pages_and_initialize(
157 &self,
158 tls: VMThread,
159 pages: usize,
160 pr: &dyn PageResource<VM>,
161 pages_reserved: usize,
162 ) -> Option<Address> {
163 let lock = self.common().acquire_lock.lock().unwrap();
170
171 let Ok(res) = pr.get_new_pages(self.common().descriptor, pages_reserved, pages, tls) else {
172 return None;
173 };
174
175 debug!(
176 "Got new pages {} ({} pages) for {} in chunk {}, new_chunk? {}",
177 res.start,
178 res.pages,
179 self.get_name(),
180 conversions::chunk_align_down(res.start),
181 res.new_chunk
182 );
183 let bytes = conversions::pages_to_bytes(res.pages);
184
185 let mmap = || {
186 if let Err(mmap_error) = self
189 .common()
190 .mmapper
191 .ensure_mapped(
192 res.start,
193 res.pages,
194 self.common().mmap_strategy(),
195 &memory::MmapAnnotation::Space {
196 name: self.get_name(),
197 },
198 )
199 .and(self.common().metadata.try_map_metadata_space(
200 res.start,
201 bytes,
202 self.get_name(),
203 ))
204 {
205 memory::handle_mmap_error::<VM>(mmap_error, tls, res.start, bytes);
206 }
207 };
208 let grow_space = || {
209 self.grow_space(res.start, bytes, res.new_chunk);
210 };
211
212 if SFT_MAP.get_side_metadata().is_some() {
214 mmap();
216 grow_space();
218 drop(lock);
220 } else {
221 grow_space();
223 drop(lock);
224 mmap();
226 }
227
228 if self.common().zeroed {
230 memory::zero(res.start, bytes);
231 }
232
233 {
235 debug_assert_eq!(SFT_MAP.get_checked(res.start).name(), self.get_name());
238 debug_assert!(self.address_in_space(res.start));
240 debug_assert_eq!(
242 self.common().vm_map().get_descriptor_for_address(res.start),
243 self.common().descriptor
244 );
245
246 let last_byte = res.start + bytes - 1;
248 debug_assert_eq!(SFT_MAP.get_checked(last_byte).name(), self.get_name());
250 debug_assert!(self.address_in_space(last_byte));
252 debug_assert_eq!(
254 self.common().vm_map().get_descriptor_for_address(last_byte),
255 self.common().descriptor
256 );
257 }
258
259 debug!("Space.acquire(), returned = {}", res.start);
260 Some(res.start)
261 }
262
263 fn not_acquiring(
269 &self,
270 tls: VMThread,
271 alloc_options: AllocationOptions,
272 pr: &dyn PageResource<VM>,
273 pages_reserved: usize,
274 attempted_allocation_and_failed: bool,
275 ) {
276 assert!(
277 VM::VMActivePlan::is_mutator(tls),
278 "A non-mutator thread failed to get pages from page resource. \
279 Copying GC plans should compute the copying headroom carefully to prevent this."
280 );
281
282 pr.clear_request(pages_reserved);
284
285 if !alloc_options.at_safepoint {
287 return;
288 }
289
290 debug!("Collection required");
291
292 if !self.common().global_state.is_initialized() {
293 panic!(
295 "GC is not allowed here: collection is not initialized \
296 (did you call initialize_collection()?). \
297 Out of physical memory: {phy}",
298 phy = attempted_allocation_and_failed
299 );
300 }
301
302 if attempted_allocation_and_failed {
303 let gc_performed = self.get_gc_trigger().poll(true, Some(self.as_space()));
305 debug_assert!(gc_performed, "GC not performed when forced.");
306 }
307
308 let meta_pages_reserved = self.estimate_side_meta_pages(pages_reserved);
310 let total_pages_reserved = pages_reserved + meta_pages_reserved;
311 self.get_gc_trigger()
312 .policy
313 .on_pending_allocation(total_pages_reserved);
314
315 VM::VMCollection::block_for_gc(VMMutatorThread(tls)); }
317
318 fn address_in_space(&self, start: Address) -> bool {
319 if !self.common().descriptor.is_contiguous() {
320 self.common().vm_map().get_descriptor_for_address(start) == self.common().descriptor
321 } else {
322 start >= self.common().start && start < self.common().start + self.common().extent
323 }
324 }
325
326 fn in_space(&self, object: ObjectReference) -> bool {
327 self.address_in_space(object.to_raw_address())
328 }
329
330 fn grow_space(&self, start: Address, bytes: usize, new_chunk: bool) {
340 trace!(
341 "Grow space from {} for {} bytes (new chunk = {})",
342 start,
343 bytes,
344 new_chunk
345 );
346
347 #[cfg(debug_assertions)]
349 if !new_chunk {
350 debug_assert!(
351 SFT_MAP.get_checked(start).name() != EMPTY_SFT_NAME,
352 "In grow_space(start = {}, bytes = {}, new_chunk = {}), we have empty SFT entries (chunk for {} = {})",
353 start,
354 bytes,
355 new_chunk,
356 start,
357 SFT_MAP.get_checked(start).name()
358 );
359 debug_assert!(
360 SFT_MAP.get_checked(start + bytes - 1).name() != EMPTY_SFT_NAME,
361 "In grow_space(start = {}, bytes = {}, new_chunk = {}), we have empty SFT entries (chunk for {} = {})",
362 start,
363 bytes,
364 new_chunk,
365 start + bytes - 1,
366 SFT_MAP.get_checked(start + bytes - 1).name()
367 );
368 }
369
370 if new_chunk {
371 unsafe { SFT_MAP.update(self.as_sft(), start, bytes) };
372 }
373 }
374
375 fn estimate_side_meta_pages(&self, data_pages: usize) -> usize {
383 self.common().metadata.calculate_reserved_pages(data_pages)
384 }
385
386 fn reserved_pages(&self) -> usize {
387 let data_pages = self.get_page_resource().reserved_pages();
388 let meta_pages = self.estimate_side_meta_pages(data_pages);
389 data_pages + meta_pages
390 }
391
392 fn available_physical_pages(&self) -> usize {
394 self.get_page_resource().get_available_physical_pages()
395 }
396
397 fn get_name(&self) -> &'static str {
398 self.common().name
399 }
400
401 fn get_descriptor(&self) -> SpaceDescriptor {
402 self.common().descriptor
403 }
404
405 fn common(&self) -> &CommonSpace<VM>;
406 fn get_gc_trigger(&self) -> &GCTrigger<VM> {
407 self.common().gc_trigger.as_ref()
408 }
409
410 fn release_multiple_pages(&mut self, start: Address);
411
412 fn set_copy_for_sft_trace(&mut self, _semantics: Option<CopySemantics>) {
415 panic!("A copying space should override this method")
416 }
417
418 fn verify_side_metadata_sanity(&self, side_metadata_sanity_checker: &mut SideMetadataSanity) {
428 side_metadata_sanity_checker
429 .verify_metadata_context(std::any::type_name::<Self>(), &self.common().metadata)
430 }
431
432 fn enumerate_objects(&self, enumerator: &mut dyn ObjectEnumerator);
453
454 fn set_allocate_as_live(&self, live: bool) {
455 self.common()
456 .allocate_as_live
457 .store(live, std::sync::atomic::Ordering::SeqCst);
458 }
459
460 fn should_allocate_as_live(&self) -> bool {
461 self.common()
462 .allocate_as_live
463 .load(std::sync::atomic::Ordering::Acquire)
464 }
465
466 fn clear_side_log_bits(&self);
469
470 fn set_side_log_bits(&self);
473}
474
475#[allow(unused)]
479pub(crate) fn print_vm_map<VM: VMBinding>(
480 space: &dyn Space<VM>,
481 out: &mut impl std::fmt::Write,
482) -> Result<(), std::fmt::Error> {
483 let common = space.common();
484 write!(out, "{} ", common.name)?;
485 if common.immortal {
486 write!(out, "I")?;
487 } else {
488 write!(out, " ")?;
489 }
490 if common.movable {
491 write!(out, " ")?;
492 } else {
493 write!(out, "N")?;
494 }
495 write!(out, " ")?;
496 if common.contiguous {
497 write!(
498 out,
499 "{}->{}",
500 common.start,
501 common.start + common.extent - 1
502 )?;
503 match common.vmrequest {
504 VMRequest::Extent { extent, .. } => {
505 write!(out, " E {}", extent)?;
506 }
507 VMRequest::Fraction { frac, .. } => {
508 write!(out, " F {}", frac)?;
509 }
510 _ => {}
511 }
512 } else {
513 let mut a = space
514 .get_page_resource()
515 .common()
516 .get_head_discontiguous_region();
517 while !a.is_zero() {
518 write!(
519 out,
520 "{}->{}",
521 a,
522 a + space.common().vm_map().get_contiguous_region_size(a) - 1
523 )?;
524 a = space.common().vm_map().get_next_contiguous_region(a);
525 if !a.is_zero() {
526 write!(out, " ")?;
527 }
528 }
529 }
530 writeln!(out)?;
531
532 Ok(())
533}
534
535impl_downcast!(Space<VM> where VM: VMBinding);
536
537pub struct CommonSpace<VM: VMBinding> {
538 pub name: &'static str,
539 pub descriptor: SpaceDescriptor,
540 pub vmrequest: VMRequest,
541
542 pub copy: Option<CopySemantics>,
545
546 pub immortal: bool,
547 pub movable: bool,
548 pub contiguous: bool,
549 pub zeroed: bool,
550
551 pub permission_exec: bool,
552
553 pub start: Address,
554 pub extent: usize,
555
556 pub vm_map: &'static dyn VMMap,
557 pub mmapper: &'static dyn Mmapper,
558
559 pub(crate) metadata: SideMetadataContext,
560
561 pub needs_log_bit: bool,
564 pub unlog_allocated_object: bool,
565 pub unlog_traced_object: bool,
566
567 pub acquire_lock: Mutex<()>,
569
570 pub gc_trigger: Arc<GCTrigger<VM>>,
571 pub global_state: Arc<GlobalState>,
572 pub options: Arc<Options>,
573
574 pub allocate_as_live: AtomicBool,
575
576 p: PhantomData<VM>,
577}
578
579pub struct PolicyCreateSpaceArgs<'a, VM: VMBinding> {
581 pub plan_args: PlanCreateSpaceArgs<'a, VM>,
582 pub movable: bool,
583 pub immortal: bool,
584 pub local_side_metadata_specs: Vec<SideMetadataSpec>,
585}
586
587pub struct PlanCreateSpaceArgs<'a, VM: VMBinding> {
589 pub name: &'static str,
590 pub zeroed: bool,
591 pub permission_exec: bool,
592 pub unlog_allocated_object: bool,
593 pub unlog_traced_object: bool,
594 pub vmrequest: VMRequest,
595 pub global_side_metadata_specs: Vec<SideMetadataSpec>,
596 pub vm_map: &'static dyn VMMap,
597 pub mmapper: &'static dyn Mmapper,
598 pub heap: &'a mut HeapMeta,
599 pub constraints: &'a PlanConstraints,
600 pub gc_trigger: Arc<GCTrigger<VM>>,
601 pub scheduler: Arc<GCWorkScheduler<VM>>,
602 pub options: Arc<Options>,
603 pub global_state: Arc<GlobalState>,
604}
605
606impl<'a, VM: VMBinding> PlanCreateSpaceArgs<'a, VM> {
607 pub fn into_policy_args(
609 self,
610 movable: bool,
611 immortal: bool,
612 policy_metadata_specs: Vec<SideMetadataSpec>,
613 ) -> PolicyCreateSpaceArgs<'a, VM> {
614 PolicyCreateSpaceArgs {
615 movable,
616 immortal,
617 local_side_metadata_specs: policy_metadata_specs,
618 plan_args: self,
619 }
620 }
621}
622
623impl<VM: VMBinding> CommonSpace<VM> {
624 pub fn new(args: PolicyCreateSpaceArgs<VM>) -> Self {
625 let mut rtn = CommonSpace {
626 name: args.plan_args.name,
627 descriptor: SpaceDescriptor::UNINITIALIZED,
628 vmrequest: args.plan_args.vmrequest,
629 copy: None,
630 immortal: args.immortal,
631 movable: args.movable,
632 contiguous: true,
633 permission_exec: args.plan_args.permission_exec,
634 zeroed: args.plan_args.zeroed,
635 start: unsafe { Address::zero() },
636 extent: 0,
637 vm_map: args.plan_args.vm_map,
638 mmapper: args.plan_args.mmapper,
639 needs_log_bit: args.plan_args.constraints.needs_log_bit,
640 unlog_allocated_object: args.plan_args.unlog_allocated_object,
641 unlog_traced_object: args.plan_args.unlog_traced_object,
642 gc_trigger: args.plan_args.gc_trigger,
643 metadata: SideMetadataContext {
644 global: args.plan_args.global_side_metadata_specs,
645 local: args.local_side_metadata_specs,
646 },
647 acquire_lock: Mutex::new(()),
648 global_state: args.plan_args.global_state,
649 options: args.plan_args.options.clone(),
650 allocate_as_live: AtomicBool::new(false),
651 p: PhantomData,
652 };
653
654 let vmrequest = args.plan_args.vmrequest;
655 if vmrequest.is_discontiguous() {
656 rtn.contiguous = false;
657 rtn.descriptor = SpaceDescriptor::create_descriptor();
659 return rtn;
661 }
662
663 let (extent, top) = match vmrequest {
664 VMRequest::Fraction { frac, top: _top } => (get_frac_available(frac), _top),
665 VMRequest::Extent {
666 extent: _extent,
667 top: _top,
668 } => (_extent, _top),
669 VMRequest::Fixed {
670 extent: _extent, ..
671 } => (_extent, false),
672 _ => unreachable!(),
673 };
674
675 assert!(
676 extent == raw_align_up(extent, BYTES_IN_CHUNK),
677 "{} requested non-aligned extent: {} bytes",
678 rtn.name,
679 extent
680 );
681
682 let start = if let VMRequest::Fixed { start: _start, .. } = vmrequest {
683 _start
684 } else {
685 args.plan_args.heap.reserve(extent, top)
688 };
689 assert!(
690 start == chunk_align_up(start),
691 "{} starting on non-aligned boundary: {}",
692 rtn.name,
693 start
694 );
695
696 rtn.contiguous = true;
697 rtn.start = start;
698 rtn.extent = extent;
699 rtn.descriptor = SpaceDescriptor::create_descriptor_from_heap_range(start, start + extent);
701 {
707 use crate::util::heap::layout;
708 let overlap =
709 Address::range_intersection(&(start..start + extent), &layout::available_range());
710 if !overlap.is_empty() {
711 args.plan_args.vm_map.insert(
712 overlap.start,
713 overlap.end - overlap.start,
714 rtn.descriptor,
715 );
716 }
717 }
718
719 rtn.metadata
721 .try_map_metadata_address_range(rtn.start, rtn.extent, rtn.name)
722 .unwrap_or_else(|e| {
723 panic!("failed to mmap meta memory: {e}");
725 });
726
727 debug!(
728 "Created space {} [{}, {}) for {} bytes",
729 rtn.name,
730 start,
731 start + extent,
732 extent
733 );
734
735 rtn
736 }
737
738 pub fn initialize_sft(
739 &self,
740 sft: &(dyn SFT + Sync + 'static),
741 sft_map: &mut dyn crate::policy::sft_map::SFTMap,
742 ) {
743 if self.contiguous {
750 unsafe { sft_map.eager_initialize(sft, self.start, self.extent) };
751 }
752 }
753
754 pub fn vm_map(&self) -> &'static dyn VMMap {
755 self.vm_map
756 }
757
758 pub fn mmap_strategy(&self) -> MmapStrategy {
759 MmapStrategy {
760 huge_page: if *self.options.transparent_hugepages {
761 HugePageSupport::TransparentHugePages
762 } else {
763 HugePageSupport::No
764 },
765 prot: if self.permission_exec || cfg!(feature = "exec_permission_on_all_spaces") {
766 MmapProtection::ReadWriteExec
767 } else {
768 MmapProtection::ReadWrite
769 },
770 }
771 }
772
773 pub(crate) fn debug_print_object_global_info(&self, object: ObjectReference) {
774 #[cfg(feature = "vo_bit")]
775 println!(
776 "vo bit = {}",
777 crate::util::metadata::vo_bit::is_vo_bit_set(object)
778 );
779 if self.needs_log_bit {
780 use crate::vm::object_model::ObjectModel;
781 use std::sync::atomic::Ordering;
782 println!(
783 "log bit = {}",
784 VM::VMObjectModel::GLOBAL_LOG_BIT_SPEC.is_unlogged::<VM>(object, Ordering::Relaxed),
785 );
786 }
787 println!("is live = {}", object.is_live());
788 }
789}
790
791fn get_frac_available(frac: f32) -> usize {
792 trace!("AVAILABLE_START={}", vm_layout().available_start());
793 trace!("AVAILABLE_END={}", vm_layout().available_end());
794 let bytes = (frac * vm_layout().available_bytes() as f32) as usize;
795 trace!("bytes={}*{}={}", frac, vm_layout().available_bytes(), bytes);
796 let mb = bytes >> LOG_BYTES_IN_MBYTE;
797 let rtn = mb << LOG_BYTES_IN_MBYTE;
798 trace!("rtn={}", rtn);
799 let aligned_rtn = raw_align_up(rtn, BYTES_IN_CHUNK);
800 trace!("aligned_rtn={}", aligned_rtn);
801 aligned_rtn
802}
803
804pub fn required_chunks(pages: usize) -> usize {
805 let extent = raw_align_up(pages_to_bytes(pages), BYTES_IN_CHUNK);
806 extent >> LOG_BYTES_IN_CHUNK
807}