1use crate::global_state::GlobalState;
2use crate::plan::PlanConstraints;
3use crate::scheduler::GCWorkScheduler;
4use crate::util::conversions::*;
5use crate::util::metadata::side_metadata::{
6 SideMetadataContext, SideMetadataSanity, SideMetadataSpec,
7};
8use crate::util::object_enum::ObjectEnumerator;
9use crate::util::Address;
10use crate::util::ObjectReference;
11
12use crate::util::heap::layout::vm_layout::{vm_layout, LOG_BYTES_IN_CHUNK};
13use crate::util::heap::{PageResource, VMRequest};
14use crate::util::options::Options;
15use crate::vm::{ActivePlan, Collection};
16
17use crate::util::constants::{LOG_BYTES_IN_MBYTE, LOG_BYTES_IN_PAGE};
18use crate::util::conversions;
19use crate::util::opaque_pointer::*;
20
21use crate::mmtk::SFT_MAP;
22#[cfg(debug_assertions)]
23use crate::policy::sft::EMPTY_SFT_NAME;
24use crate::policy::sft::SFT;
25use crate::util::alloc::allocator::AllocationOptions;
26use crate::util::copy::*;
27use crate::util::heap::gc_trigger::GCTrigger;
28use crate::util::heap::layout::vm_layout::BYTES_IN_CHUNK;
29use crate::util::heap::layout::Mmapper;
30use crate::util::heap::layout::VMMap;
31use crate::util::heap::space_descriptor::SpaceDescriptor;
32use crate::util::heap::HeapMeta;
33use crate::util::os::*;
34use crate::vm::VMBinding;
35
36use std::marker::PhantomData;
37use std::sync::atomic::AtomicBool;
38use std::sync::Arc;
39use std::sync::Mutex;
40
41use downcast_rs::Downcast;
42
43pub trait Space<VM: VMBinding>: 'static + SFT + Sync + Downcast {
44 fn as_space(&self) -> &dyn Space<VM>;
45 fn as_sft(&self) -> &(dyn SFT + Sync + 'static);
46 fn get_page_resource(&self) -> &dyn PageResource<VM>;
47
48 fn maybe_get_page_resource_mut(&mut self) -> Option<&mut dyn PageResource<VM>>;
51
52 fn initialize_sft(&self, sft_map: &mut dyn crate::policy::sft_map::SFTMap);
56
57 fn will_oom_on_acquire(&self, size: usize) -> bool {
73 let max_pages = self.get_gc_trigger().policy.get_max_heap_size_in_pages();
74 let requested_pages = size >> LOG_BYTES_IN_PAGE;
75 requested_pages > max_pages
76 }
77
78 fn handle_obvious_oom_request(
82 &self,
83 tls: VMThread,
84 size: usize,
85 alloc_options: AllocationOptions,
86 ) -> bool {
87 if self.will_oom_on_acquire(size) {
88 if alloc_options.allow_oom_call {
89 VM::VMCollection::out_of_memory(
90 tls,
91 crate::util::alloc::AllocationError::HeapOutOfMemory,
92 );
93 }
94 return true;
95 }
96 false
97 }
98
99 fn acquire(&self, tls: VMThread, pages: usize, alloc_options: AllocationOptions) -> Address {
100 trace!(
101 "Space.acquire, tls={:?}, alloc_options={:?}",
102 tls,
103 alloc_options
104 );
105
106 debug_assert!(
107 !self.will_oom_on_acquire(pages << LOG_BYTES_IN_PAGE),
108 "The requested pages is larger than the max heap size. Is will_go_oom_on_acquire used before acquring memory?"
109 );
110
111 trace!("Reserving pages");
112 let pr = self.get_page_resource();
113 let pages_reserved = pr.reserve_pages(pages);
114 trace!("Pages reserved");
115
116 let should_poll = VM::VMActivePlan::is_mutator(tls);
119
120 let gc_triggered = should_poll && {
123 trace!("Polling ..");
124 self.get_gc_trigger().poll(false, Some(self.as_space()))
125 };
126
127 let should_get_pages = !gc_triggered || alloc_options.allow_overcommit;
131
132 if should_get_pages {
136 if let Some(addr) = self.get_new_pages_and_initialize(tls, pages, pr, pages_reserved) {
137 addr
138 } else {
139 self.not_acquiring(tls, alloc_options, pr, pages_reserved, true);
140 Address::ZERO
141 }
142 } else {
143 self.not_acquiring(tls, alloc_options, pr, pages_reserved, false);
144 Address::ZERO
145 }
146 }
147
148 fn get_new_pages_and_initialize(
157 &self,
158 tls: VMThread,
159 pages: usize,
160 pr: &dyn PageResource<VM>,
161 pages_reserved: usize,
162 ) -> Option<Address> {
163 let lock = self.common().acquire_lock.lock().unwrap();
170
171 let Ok(res) = pr.get_new_pages(self.common().descriptor, pages_reserved, pages, tls) else {
172 return None;
173 };
174
175 debug!(
176 "Got new pages {} ({} pages) for {} in chunk {}, new_chunk? {}",
177 res.start,
178 res.pages,
179 self.get_name(),
180 conversions::chunk_align_down(res.start),
181 res.new_chunk
182 );
183 let bytes = conversions::pages_to_bytes(res.pages);
184
185 let mmap = || {
186 if let Err(mmap_error) = self
189 .common()
190 .mmapper
191 .ensure_mapped(
192 res.start,
193 res.pages,
194 if *self.common().options.transparent_hugepages {
195 HugePageSupport::TransparentHugePages
196 } else {
197 HugePageSupport::No
198 },
199 self.common().mmap_protection(),
200 &MmapAnnotation::Space {
201 name: self.get_name(),
202 },
203 )
204 .and(self.common().metadata.try_map_metadata_space(
205 res.start,
206 bytes,
207 self.get_name(),
208 ))
209 {
210 OS::handle_mmap_error::<VM>(mmap_error, tls);
211 }
212 };
213 let grow_space = || {
214 self.grow_space(res.start, bytes, res.new_chunk);
215 };
216
217 if SFT_MAP.get_side_metadata().is_some() {
219 mmap();
221 grow_space();
223 drop(lock);
225 } else {
226 grow_space();
228 drop(lock);
229 mmap();
231 }
232
233 if self.common().zeroed {
235 crate::util::memory::zero(res.start, bytes);
236 }
237
238 {
240 debug_assert_eq!(SFT_MAP.get_checked(res.start).name(), self.get_name());
243 debug_assert!(self.address_in_space(res.start));
245 debug_assert_eq!(
247 self.common().vm_map().get_descriptor_for_address(res.start),
248 self.common().descriptor
249 );
250
251 let last_byte = res.start + bytes - 1;
253 debug_assert_eq!(SFT_MAP.get_checked(last_byte).name(), self.get_name());
255 debug_assert!(self.address_in_space(last_byte));
257 debug_assert_eq!(
259 self.common().vm_map().get_descriptor_for_address(last_byte),
260 self.common().descriptor
261 );
262 }
263
264 debug!("Space.acquire(), returned = {}", res.start);
265 Some(res.start)
266 }
267
268 fn not_acquiring(
274 &self,
275 tls: VMThread,
276 alloc_options: AllocationOptions,
277 pr: &dyn PageResource<VM>,
278 pages_reserved: usize,
279 attempted_allocation_and_failed: bool,
280 ) {
281 assert!(
282 VM::VMActivePlan::is_mutator(tls),
283 "A non-mutator thread failed to get pages from page resource. \
284 Copying GC plans should compute the copying headroom carefully to prevent this."
285 );
286
287 pr.clear_request(pages_reserved);
289
290 if !alloc_options.at_safepoint {
292 return;
293 }
294
295 debug!("Collection required");
296
297 if !self.common().global_state.is_initialized() {
298 panic!(
300 "GC is not allowed here: collection is not initialized \
301 (did you call initialize_collection()?). \
302 Out of physical memory: {phy}",
303 phy = attempted_allocation_and_failed
304 );
305 }
306
307 if attempted_allocation_and_failed {
308 let gc_performed = self.get_gc_trigger().poll(true, Some(self.as_space()));
310 debug_assert!(gc_performed, "GC not performed when forced.");
311 }
312
313 let meta_pages_reserved = self.estimate_side_meta_pages(pages_reserved);
315 let total_pages_reserved = pages_reserved + meta_pages_reserved;
316 self.get_gc_trigger()
317 .policy
318 .on_pending_allocation(total_pages_reserved);
319
320 VM::VMCollection::block_for_gc(VMMutatorThread(tls)); }
322
323 fn address_in_space(&self, start: Address) -> bool {
324 if !self.common().descriptor.is_contiguous() {
325 self.common().vm_map().get_descriptor_for_address(start) == self.common().descriptor
326 } else {
327 start >= self.common().start && start < self.common().start + self.common().extent
328 }
329 }
330
331 fn in_space(&self, object: ObjectReference) -> bool {
332 self.address_in_space(object.to_raw_address())
333 }
334
335 fn grow_space(&self, start: Address, bytes: usize, new_chunk: bool) {
345 trace!(
346 "Grow space from {} for {} bytes (new chunk = {})",
347 start,
348 bytes,
349 new_chunk
350 );
351
352 #[cfg(debug_assertions)]
354 if !new_chunk {
355 debug_assert!(
356 SFT_MAP.get_checked(start).name() != EMPTY_SFT_NAME,
357 "In grow_space(start = {}, bytes = {}, new_chunk = {}), we have empty SFT entries (chunk for {} = {})",
358 start,
359 bytes,
360 new_chunk,
361 start,
362 SFT_MAP.get_checked(start).name()
363 );
364 debug_assert!(
365 SFT_MAP.get_checked(start + bytes - 1).name() != EMPTY_SFT_NAME,
366 "In grow_space(start = {}, bytes = {}, new_chunk = {}), we have empty SFT entries (chunk for {} = {})",
367 start,
368 bytes,
369 new_chunk,
370 start + bytes - 1,
371 SFT_MAP.get_checked(start + bytes - 1).name()
372 );
373 }
374
375 if new_chunk {
376 unsafe { SFT_MAP.update(self.as_sft(), start, bytes) };
377 }
378 }
379
380 fn estimate_side_meta_pages(&self, data_pages: usize) -> usize {
388 self.common().metadata.calculate_reserved_pages(data_pages)
389 }
390
391 fn reserved_pages(&self) -> usize {
392 let data_pages = self.get_page_resource().reserved_pages();
393 let meta_pages = self.estimate_side_meta_pages(data_pages);
394 data_pages + meta_pages
395 }
396
397 fn available_physical_pages(&self) -> usize {
399 self.get_page_resource().get_available_physical_pages()
400 }
401
402 fn get_name(&self) -> &'static str {
403 self.common().name
404 }
405
406 fn get_descriptor(&self) -> SpaceDescriptor {
407 self.common().descriptor
408 }
409
410 fn common(&self) -> &CommonSpace<VM>;
411 fn get_gc_trigger(&self) -> &GCTrigger<VM> {
412 self.common().gc_trigger.as_ref()
413 }
414
415 fn release_multiple_pages(&mut self, start: Address);
416
417 fn set_copy_for_sft_trace(&mut self, _semantics: Option<CopySemantics>) {
420 panic!("A copying space should override this method")
421 }
422
423 fn verify_side_metadata_sanity(&self, side_metadata_sanity_checker: &mut SideMetadataSanity) {
433 side_metadata_sanity_checker
434 .verify_metadata_context(std::any::type_name::<Self>(), &self.common().metadata)
435 }
436
437 fn enumerate_objects(&self, enumerator: &mut dyn ObjectEnumerator);
458
459 fn set_allocate_as_live(&self, live: bool) {
460 self.common()
461 .allocate_as_live
462 .store(live, std::sync::atomic::Ordering::SeqCst);
463 }
464
465 fn should_allocate_as_live(&self) -> bool {
466 self.common()
467 .allocate_as_live
468 .load(std::sync::atomic::Ordering::Acquire)
469 }
470
471 fn clear_side_log_bits(&self);
474
475 fn set_side_log_bits(&self);
478}
479
480#[allow(unused)]
484pub(crate) fn print_vm_map<VM: VMBinding>(
485 space: &dyn Space<VM>,
486 out: &mut impl std::fmt::Write,
487) -> Result<(), std::fmt::Error> {
488 let common = space.common();
489 write!(out, "{} ", common.name)?;
490 if common.immortal {
491 write!(out, "I")?;
492 } else {
493 write!(out, " ")?;
494 }
495 if common.movable {
496 write!(out, " ")?;
497 } else {
498 write!(out, "N")?;
499 }
500 write!(out, " ")?;
501 if common.contiguous {
502 write!(
503 out,
504 "{}->{}",
505 common.start,
506 common.start + common.extent - 1
507 )?;
508 match common.vmrequest {
509 VMRequest::Extent { extent, .. } => {
510 write!(out, " E {}", extent)?;
511 }
512 VMRequest::Fraction { frac, .. } => {
513 write!(out, " F {}", frac)?;
514 }
515 _ => {}
516 }
517 } else {
518 let mut a = space
519 .get_page_resource()
520 .common()
521 .get_head_discontiguous_region();
522 while !a.is_zero() {
523 write!(
524 out,
525 "{}->{}",
526 a,
527 a + space.common().vm_map().get_contiguous_region_size(a) - 1
528 )?;
529 a = space.common().vm_map().get_next_contiguous_region(a);
530 if !a.is_zero() {
531 write!(out, " ")?;
532 }
533 }
534 }
535 writeln!(out)?;
536
537 Ok(())
538}
539
540impl_downcast!(Space<VM> where VM: VMBinding);
541
542pub struct CommonSpace<VM: VMBinding> {
543 pub name: &'static str,
544 pub descriptor: SpaceDescriptor,
545 pub vmrequest: VMRequest,
546
547 pub copy: Option<CopySemantics>,
550
551 pub immortal: bool,
552 pub movable: bool,
553 pub contiguous: bool,
554 pub zeroed: bool,
555
556 pub permission_exec: bool,
557
558 pub start: Address,
559 pub extent: usize,
560
561 pub vm_map: &'static dyn VMMap,
562 pub mmapper: &'static dyn Mmapper,
563
564 pub(crate) metadata: SideMetadataContext,
565
566 pub needs_log_bit: bool,
569 pub unlog_allocated_object: bool,
570 pub unlog_traced_object: bool,
571
572 pub acquire_lock: Mutex<()>,
574
575 pub gc_trigger: Arc<GCTrigger<VM>>,
576 pub global_state: Arc<GlobalState>,
577 pub options: Arc<Options>,
578
579 pub allocate_as_live: AtomicBool,
580
581 p: PhantomData<VM>,
582}
583
584pub struct PolicyCreateSpaceArgs<'a, VM: VMBinding> {
586 pub plan_args: PlanCreateSpaceArgs<'a, VM>,
587 pub movable: bool,
588 pub immortal: bool,
589 pub local_side_metadata_specs: Vec<SideMetadataSpec>,
590}
591
592pub struct PlanCreateSpaceArgs<'a, VM: VMBinding> {
594 pub name: &'static str,
595 pub zeroed: bool,
596 pub permission_exec: bool,
597 pub unlog_allocated_object: bool,
598 pub unlog_traced_object: bool,
599 pub vmrequest: VMRequest,
600 pub global_side_metadata_specs: Vec<SideMetadataSpec>,
601 pub vm_map: &'static dyn VMMap,
602 pub mmapper: &'static dyn Mmapper,
603 pub heap: &'a mut HeapMeta,
604 pub constraints: &'a PlanConstraints,
605 pub gc_trigger: Arc<GCTrigger<VM>>,
606 pub scheduler: Arc<GCWorkScheduler<VM>>,
607 pub options: Arc<Options>,
608 pub global_state: Arc<GlobalState>,
609}
610
611impl<'a, VM: VMBinding> PlanCreateSpaceArgs<'a, VM> {
612 pub fn into_policy_args(
614 self,
615 movable: bool,
616 immortal: bool,
617 policy_metadata_specs: Vec<SideMetadataSpec>,
618 ) -> PolicyCreateSpaceArgs<'a, VM> {
619 PolicyCreateSpaceArgs {
620 movable,
621 immortal,
622 local_side_metadata_specs: policy_metadata_specs,
623 plan_args: self,
624 }
625 }
626}
627
628impl<VM: VMBinding> CommonSpace<VM> {
629 pub fn new(args: PolicyCreateSpaceArgs<VM>) -> Self {
630 let mut rtn = CommonSpace {
631 name: args.plan_args.name,
632 descriptor: SpaceDescriptor::UNINITIALIZED,
633 vmrequest: args.plan_args.vmrequest,
634 copy: None,
635 immortal: args.immortal,
636 movable: args.movable,
637 contiguous: true,
638 permission_exec: args.plan_args.permission_exec,
639 zeroed: args.plan_args.zeroed,
640 start: unsafe { Address::zero() },
641 extent: 0,
642 vm_map: args.plan_args.vm_map,
643 mmapper: args.plan_args.mmapper,
644 needs_log_bit: args.plan_args.constraints.needs_log_bit,
645 unlog_allocated_object: args.plan_args.unlog_allocated_object,
646 unlog_traced_object: args.plan_args.unlog_traced_object,
647 gc_trigger: args.plan_args.gc_trigger,
648 metadata: SideMetadataContext {
649 global: args.plan_args.global_side_metadata_specs,
650 local: args.local_side_metadata_specs,
651 },
652 acquire_lock: Mutex::new(()),
653 global_state: args.plan_args.global_state,
654 options: args.plan_args.options.clone(),
655 allocate_as_live: AtomicBool::new(false),
656 p: PhantomData,
657 };
658
659 let vmrequest = args.plan_args.vmrequest;
660 if vmrequest.is_discontiguous() {
661 rtn.contiguous = false;
662 rtn.descriptor = SpaceDescriptor::create_descriptor();
664 return rtn;
666 }
667
668 let (extent, top) = match vmrequest {
669 VMRequest::Fraction { frac, top: _top } => (get_frac_available(frac), _top),
670 VMRequest::Extent {
671 extent: _extent,
672 top: _top,
673 } => (_extent, _top),
674 VMRequest::Fixed {
675 extent: _extent, ..
676 } => (_extent, false),
677 _ => unreachable!(),
678 };
679
680 assert!(
681 extent == raw_align_up(extent, BYTES_IN_CHUNK),
682 "{} requested non-aligned extent: {} bytes",
683 rtn.name,
684 extent
685 );
686
687 let start = if let VMRequest::Fixed { start: _start, .. } = vmrequest {
688 _start
689 } else {
690 args.plan_args.heap.reserve(extent, top)
693 };
694 assert!(
695 start == chunk_align_up(start),
696 "{} starting on non-aligned boundary: {}",
697 rtn.name,
698 start
699 );
700
701 rtn.contiguous = true;
702 rtn.start = start;
703 rtn.extent = extent;
704 rtn.descriptor = SpaceDescriptor::create_descriptor_from_heap_range(start, start + extent);
706 {
712 use crate::util::heap::layout;
713 let overlap =
714 Address::range_intersection(&(start..start + extent), &layout::available_range());
715 if !overlap.is_empty() {
716 args.plan_args.vm_map.insert(
717 overlap.start,
718 overlap.end - overlap.start,
719 rtn.descriptor,
720 );
721 }
722 }
723
724 rtn.metadata
726 .try_map_metadata_address_range(rtn.start, rtn.extent, rtn.name)
727 .unwrap_or_else(|e| {
728 panic!("failed to mmap meta memory: {e}");
730 });
731
732 debug!(
733 "Created space {} [{}, {}) for {} bytes",
734 rtn.name,
735 start,
736 start + extent,
737 extent
738 );
739
740 rtn
741 }
742
743 pub fn initialize_sft(
744 &self,
745 sft: &(dyn SFT + Sync + 'static),
746 sft_map: &mut dyn crate::policy::sft_map::SFTMap,
747 ) {
748 if self.contiguous {
755 unsafe { sft_map.eager_initialize(sft, self.start, self.extent) };
756 }
757 }
758
759 pub fn vm_map(&self) -> &'static dyn VMMap {
760 self.vm_map
761 }
762
763 pub fn mmap_protection(&self) -> MmapProtection {
764 if self.permission_exec || cfg!(feature = "exec_permission_on_all_spaces") {
765 MmapProtection::ReadWriteExec
766 } else {
767 MmapProtection::ReadWrite
768 }
769 }
770
771 pub(crate) fn debug_print_object_global_info(&self, object: ObjectReference) {
772 #[cfg(feature = "vo_bit")]
773 println!(
774 "vo bit = {}",
775 crate::util::metadata::vo_bit::is_vo_bit_set(object)
776 );
777 if self.needs_log_bit {
778 use crate::vm::object_model::ObjectModel;
779 use std::sync::atomic::Ordering;
780 println!(
781 "log bit = {}",
782 VM::VMObjectModel::GLOBAL_LOG_BIT_SPEC.is_unlogged::<VM>(object, Ordering::Relaxed),
783 );
784 }
785 println!("is live = {}", object.is_live());
786 }
787}
788
789fn get_frac_available(frac: f32) -> usize {
790 trace!("AVAILABLE_START={}", vm_layout().available_start());
791 trace!("AVAILABLE_END={}", vm_layout().available_end());
792 let bytes = (frac * vm_layout().available_bytes() as f32) as usize;
793 trace!("bytes={}*{}={}", frac, vm_layout().available_bytes(), bytes);
794 let mb = bytes >> LOG_BYTES_IN_MBYTE;
795 let rtn = mb << LOG_BYTES_IN_MBYTE;
796 trace!("rtn={}", rtn);
797 let aligned_rtn = raw_align_up(rtn, BYTES_IN_CHUNK);
798 trace!("aligned_rtn={}", aligned_rtn);
799 aligned_rtn
800}
801
802pub fn required_chunks(pages: usize) -> usize {
803 let extent = raw_align_up(pages_to_bytes(pages), BYTES_IN_CHUNK);
804 extent >> LOG_BYTES_IN_CHUNK
805}