1use crate::global_state::GlobalState;
2use crate::plan::PlanConstraints;
3use crate::scheduler::GCWorkScheduler;
4use crate::util::conversions::*;
5use crate::util::metadata::side_metadata::{
6 SideMetadataContext, SideMetadataSanity, SideMetadataSpec,
7};
8use crate::util::object_enum::ObjectEnumerator;
9use crate::util::Address;
10use crate::util::ObjectReference;
11
12use crate::util::heap::layout::vm_layout::{vm_layout, LOG_BYTES_IN_CHUNK};
13use crate::util::heap::{PageResource, VMRequest};
14use crate::util::options::Options;
15use crate::vm::{ActivePlan, Collection};
16
17use crate::util::constants::LOG_BYTES_IN_MBYTE;
18use crate::util::conversions;
19use crate::util::opaque_pointer::*;
20
21use crate::mmtk::SFT_MAP;
22#[cfg(debug_assertions)]
23use crate::policy::sft::EMPTY_SFT_NAME;
24use crate::policy::sft::SFT;
25use crate::util::alloc::allocator::AllocationOptions;
26use crate::util::copy::*;
27use crate::util::heap::gc_trigger::GCTrigger;
28use crate::util::heap::layout::vm_layout::BYTES_IN_CHUNK;
29use crate::util::heap::layout::Mmapper;
30use crate::util::heap::layout::VMMap;
31use crate::util::heap::space_descriptor::SpaceDescriptor;
32use crate::util::heap::HeapMeta;
33use crate::util::os::*;
34use crate::vm::VMBinding;
35
36use std::marker::PhantomData;
37use std::sync::atomic::AtomicBool;
38use std::sync::Arc;
39use std::sync::Mutex;
40
41use downcast_rs::Downcast;
42
43pub trait Space<VM: VMBinding>: 'static + SFT + Sync + Downcast {
44 fn as_space(&self) -> &dyn Space<VM>;
45 fn as_sft(&self) -> &(dyn SFT + Sync + 'static);
46 fn get_page_resource(&self) -> &dyn PageResource<VM>;
47
48 fn maybe_get_page_resource_mut(&mut self) -> Option<&mut dyn PageResource<VM>>;
51
52 fn initialize_sft(&self, sft_map: &mut dyn crate::policy::sft_map::SFTMap);
56
57 fn acquire(&self, tls: VMThread, pages: usize, alloc_options: AllocationOptions) -> Address {
58 trace!(
59 "Space.acquire, tls={:?}, alloc_options={:?}",
60 tls,
61 alloc_options
62 );
63
64 debug_assert!(
65 !self.get_gc_trigger().will_oom_on_alloc(pages << crate::util::constants::LOG_BYTES_IN_PAGE),
66 "The requested pages is larger than the max heap size. Is will_go_oom_on_acquire used before acquring memory?"
67 );
68
69 trace!("Reserving pages");
70 let pr = self.get_page_resource();
71 let pages_reserved = pr.reserve_pages(pages);
72 trace!("Pages reserved");
73
74 let should_poll = VM::VMActivePlan::is_mutator(tls);
77
78 let gc_triggered = should_poll && {
81 trace!("Polling ..");
82 self.get_gc_trigger().poll(false, Some(self.as_space()))
83 };
84
85 let should_get_pages = !gc_triggered || alloc_options.allow_overcommit;
89
90 if should_get_pages {
94 if let Some(addr) = self.get_new_pages_and_initialize(tls, pages, pr, pages_reserved) {
95 addr
96 } else {
97 self.not_acquiring(tls, alloc_options, pr, pages_reserved, true);
98 Address::ZERO
99 }
100 } else {
101 self.not_acquiring(tls, alloc_options, pr, pages_reserved, false);
102 Address::ZERO
103 }
104 }
105
106 fn get_new_pages_and_initialize(
115 &self,
116 tls: VMThread,
117 pages: usize,
118 pr: &dyn PageResource<VM>,
119 pages_reserved: usize,
120 ) -> Option<Address> {
121 let lock = self.common().acquire_lock.lock().unwrap();
128
129 let Ok(res) = pr.get_new_pages(self.common().descriptor, pages_reserved, pages, tls) else {
130 return None;
131 };
132
133 debug!(
134 "Got new pages {} ({} pages) for {} in chunk {}, new_chunk? {}",
135 res.start,
136 res.pages,
137 self.get_name(),
138 conversions::chunk_align_down(res.start),
139 res.new_chunk
140 );
141 let bytes = conversions::pages_to_bytes(res.pages);
142
143 let mmap = || {
144 if let Err(mmap_error) = self
147 .common()
148 .mmapper
149 .ensure_mapped(
150 res.start,
151 res.pages,
152 if *self.common().options.transparent_hugepages {
153 HugePageSupport::TransparentHugePages
154 } else {
155 HugePageSupport::No
156 },
157 self.common().mmap_protection(),
158 &MmapAnnotation::Space {
159 name: self.get_name(),
160 },
161 )
162 .and(self.common().metadata.try_map_metadata_space(
163 res.start,
164 bytes,
165 self.get_name(),
166 ))
167 {
168 OS::handle_mmap_error::<VM>(mmap_error, tls);
169 }
170 };
171 let grow_space = || {
172 self.grow_space(res.start, bytes, res.new_chunk);
173 };
174
175 if SFT_MAP.get_side_metadata().is_some() {
177 mmap();
179 grow_space();
181 drop(lock);
183 } else {
184 grow_space();
186 drop(lock);
187 mmap();
189 }
190
191 if self.common().zeroed {
193 crate::util::memory::zero(res.start, bytes);
194 }
195
196 {
198 debug_assert_eq!(SFT_MAP.get_checked(res.start).name(), self.get_name());
201 debug_assert!(self.address_in_space(res.start));
203 debug_assert_eq!(
205 self.common().vm_map().get_descriptor_for_address(res.start),
206 self.common().descriptor
207 );
208
209 let last_byte = res.start + bytes - 1;
211 debug_assert_eq!(SFT_MAP.get_checked(last_byte).name(), self.get_name());
213 debug_assert!(self.address_in_space(last_byte));
215 debug_assert_eq!(
217 self.common().vm_map().get_descriptor_for_address(last_byte),
218 self.common().descriptor
219 );
220 }
221
222 debug!("Space.acquire(), returned = {}", res.start);
223 Some(res.start)
224 }
225
226 fn not_acquiring(
232 &self,
233 tls: VMThread,
234 alloc_options: AllocationOptions,
235 pr: &dyn PageResource<VM>,
236 pages_reserved: usize,
237 attempted_allocation_and_failed: bool,
238 ) {
239 assert!(
240 VM::VMActivePlan::is_mutator(tls),
241 "A non-mutator thread failed to get pages from page resource. \
242 Copying GC plans should compute the copying headroom carefully to prevent this."
243 );
244
245 pr.clear_request(pages_reserved);
247
248 if !alloc_options.at_safepoint {
250 return;
251 }
252
253 debug!("Collection required");
254
255 if !self.common().global_state.is_initialized() {
256 panic!(
258 "GC is not allowed here: collection is not initialized \
259 (did you call initialize_collection()?). \
260 Out of physical memory: {phy}",
261 phy = attempted_allocation_and_failed
262 );
263 }
264
265 if attempted_allocation_and_failed {
266 let gc_performed = self.get_gc_trigger().poll(true, Some(self.as_space()));
268 debug_assert!(gc_performed, "GC not performed when forced.");
269 }
270
271 let meta_pages_reserved = self.estimate_side_meta_pages(pages_reserved);
273 let total_pages_reserved = pages_reserved + meta_pages_reserved;
274 self.get_gc_trigger()
275 .policy
276 .on_pending_allocation(total_pages_reserved);
277
278 VM::VMCollection::block_for_gc(VMMutatorThread(tls)); }
280
281 fn address_in_space(&self, start: Address) -> bool {
282 if !self.common().descriptor.is_contiguous() {
283 self.common().vm_map().get_descriptor_for_address(start) == self.common().descriptor
284 } else {
285 start >= self.common().start && start < self.common().start + self.common().extent
286 }
287 }
288
289 fn in_space(&self, object: ObjectReference) -> bool {
290 self.address_in_space(object.to_raw_address())
291 }
292
293 fn grow_space(&self, start: Address, bytes: usize, new_chunk: bool) {
303 trace!(
304 "Grow space from {} for {} bytes (new chunk = {})",
305 start,
306 bytes,
307 new_chunk
308 );
309
310 #[cfg(debug_assertions)]
312 if !new_chunk {
313 debug_assert!(
314 SFT_MAP.get_checked(start).name() != EMPTY_SFT_NAME,
315 "In grow_space(start = {}, bytes = {}, new_chunk = {}), we have empty SFT entries (chunk for {} = {})",
316 start,
317 bytes,
318 new_chunk,
319 start,
320 SFT_MAP.get_checked(start).name()
321 );
322 debug_assert!(
323 SFT_MAP.get_checked(start + bytes - 1).name() != EMPTY_SFT_NAME,
324 "In grow_space(start = {}, bytes = {}, new_chunk = {}), we have empty SFT entries (chunk for {} = {})",
325 start,
326 bytes,
327 new_chunk,
328 start + bytes - 1,
329 SFT_MAP.get_checked(start + bytes - 1).name()
330 );
331 }
332
333 if new_chunk {
334 unsafe { SFT_MAP.update(self.as_sft(), start, bytes) };
335 }
336 }
337
338 fn estimate_side_meta_pages(&self, data_pages: usize) -> usize {
346 self.common().metadata.calculate_reserved_pages(data_pages)
347 }
348
349 fn reserved_pages(&self) -> usize {
350 let data_pages = self.get_page_resource().reserved_pages();
351 let meta_pages = self.estimate_side_meta_pages(data_pages);
352 data_pages + meta_pages
353 }
354
355 fn available_physical_pages(&self) -> usize {
357 self.get_page_resource().get_available_physical_pages()
358 }
359
360 fn get_name(&self) -> &'static str {
361 self.common().name
362 }
363
364 fn get_descriptor(&self) -> SpaceDescriptor {
365 self.common().descriptor
366 }
367
368 fn common(&self) -> &CommonSpace<VM>;
369 fn get_gc_trigger(&self) -> &GCTrigger<VM> {
370 self.common().gc_trigger.as_ref()
371 }
372
373 fn release_multiple_pages(&mut self, start: Address);
374
375 fn set_copy_for_sft_trace(&mut self, _semantics: Option<CopySemantics>) {
378 panic!("A copying space should override this method")
379 }
380
381 fn verify_side_metadata_sanity(&self, side_metadata_sanity_checker: &mut SideMetadataSanity) {
391 side_metadata_sanity_checker
392 .verify_metadata_context(std::any::type_name::<Self>(), &self.common().metadata)
393 }
394
395 fn enumerate_objects(&self, enumerator: &mut dyn ObjectEnumerator);
416
417 fn set_allocate_as_live(&self, live: bool) {
418 self.common()
419 .allocate_as_live
420 .store(live, std::sync::atomic::Ordering::SeqCst);
421 }
422
423 fn should_allocate_as_live(&self) -> bool {
424 self.common()
425 .allocate_as_live
426 .load(std::sync::atomic::Ordering::Acquire)
427 }
428
429 fn clear_side_log_bits(&self);
432
433 fn set_side_log_bits(&self);
436}
437
438#[allow(unused)]
442pub(crate) fn print_vm_map<VM: VMBinding>(
443 space: &dyn Space<VM>,
444 out: &mut impl std::fmt::Write,
445) -> Result<(), std::fmt::Error> {
446 let common = space.common();
447 write!(out, "{} ", common.name)?;
448 if common.immortal {
449 write!(out, "I")?;
450 } else {
451 write!(out, " ")?;
452 }
453 if common.movable {
454 write!(out, " ")?;
455 } else {
456 write!(out, "N")?;
457 }
458 write!(out, " ")?;
459 if common.contiguous {
460 write!(
461 out,
462 "{}->{}",
463 common.start,
464 common.start + common.extent - 1
465 )?;
466 match common.vmrequest {
467 VMRequest::Extent { extent, .. } => {
468 write!(out, " E {}", extent)?;
469 }
470 VMRequest::Fraction { frac, .. } => {
471 write!(out, " F {}", frac)?;
472 }
473 _ => {}
474 }
475 } else {
476 let mut a = space
477 .get_page_resource()
478 .common()
479 .get_head_discontiguous_region();
480 while !a.is_zero() {
481 write!(
482 out,
483 "{}->{}",
484 a,
485 a + space.common().vm_map().get_contiguous_region_size(a) - 1
486 )?;
487 a = space.common().vm_map().get_next_contiguous_region(a);
488 if !a.is_zero() {
489 write!(out, " ")?;
490 }
491 }
492 }
493 writeln!(out)?;
494
495 Ok(())
496}
497
498impl_downcast!(Space<VM> where VM: VMBinding);
499
500pub struct CommonSpace<VM: VMBinding> {
501 pub name: &'static str,
502 pub descriptor: SpaceDescriptor,
503 pub vmrequest: VMRequest,
504
505 pub copy: Option<CopySemantics>,
508
509 pub immortal: bool,
510 pub movable: bool,
511 pub contiguous: bool,
512 pub zeroed: bool,
513
514 pub permission_exec: bool,
515
516 pub start: Address,
517 pub extent: usize,
518
519 pub vm_map: &'static dyn VMMap,
520 pub mmapper: &'static dyn Mmapper,
521
522 pub(crate) metadata: SideMetadataContext,
523
524 pub needs_log_bit: bool,
527 pub unlog_allocated_object: bool,
528 pub unlog_traced_object: bool,
529
530 pub acquire_lock: Mutex<()>,
532
533 pub gc_trigger: Arc<GCTrigger<VM>>,
534 pub global_state: Arc<GlobalState>,
535 pub options: Arc<Options>,
536
537 pub allocate_as_live: AtomicBool,
538
539 p: PhantomData<VM>,
540}
541
542pub struct PolicyCreateSpaceArgs<'a, VM: VMBinding> {
544 pub plan_args: PlanCreateSpaceArgs<'a, VM>,
545 pub movable: bool,
546 pub immortal: bool,
547 pub local_side_metadata_specs: Vec<SideMetadataSpec>,
548}
549
550pub struct PlanCreateSpaceArgs<'a, VM: VMBinding> {
552 pub name: &'static str,
553 pub zeroed: bool,
554 pub permission_exec: bool,
555 pub unlog_allocated_object: bool,
556 pub unlog_traced_object: bool,
557 pub vmrequest: VMRequest,
558 pub global_side_metadata_specs: Vec<SideMetadataSpec>,
559 pub vm_map: &'static dyn VMMap,
560 pub mmapper: &'static dyn Mmapper,
561 pub heap: &'a mut HeapMeta,
562 pub constraints: &'a PlanConstraints,
563 pub gc_trigger: Arc<GCTrigger<VM>>,
564 pub scheduler: Arc<GCWorkScheduler<VM>>,
565 pub options: Arc<Options>,
566 pub global_state: Arc<GlobalState>,
567}
568
569impl<'a, VM: VMBinding> PlanCreateSpaceArgs<'a, VM> {
570 pub fn into_policy_args(
572 self,
573 movable: bool,
574 immortal: bool,
575 policy_metadata_specs: Vec<SideMetadataSpec>,
576 ) -> PolicyCreateSpaceArgs<'a, VM> {
577 PolicyCreateSpaceArgs {
578 movable,
579 immortal,
580 local_side_metadata_specs: policy_metadata_specs,
581 plan_args: self,
582 }
583 }
584}
585
586impl<VM: VMBinding> CommonSpace<VM> {
587 pub fn new(args: PolicyCreateSpaceArgs<VM>) -> Self {
588 let mut rtn = CommonSpace {
589 name: args.plan_args.name,
590 descriptor: SpaceDescriptor::UNINITIALIZED,
591 vmrequest: args.plan_args.vmrequest,
592 copy: None,
593 immortal: args.immortal,
594 movable: args.movable,
595 contiguous: true,
596 permission_exec: args.plan_args.permission_exec,
597 zeroed: args.plan_args.zeroed,
598 start: unsafe { Address::zero() },
599 extent: 0,
600 vm_map: args.plan_args.vm_map,
601 mmapper: args.plan_args.mmapper,
602 needs_log_bit: args.plan_args.constraints.needs_log_bit,
603 unlog_allocated_object: args.plan_args.unlog_allocated_object,
604 unlog_traced_object: args.plan_args.unlog_traced_object,
605 gc_trigger: args.plan_args.gc_trigger,
606 metadata: SideMetadataContext {
607 global: args.plan_args.global_side_metadata_specs,
608 local: args.local_side_metadata_specs,
609 },
610 acquire_lock: Mutex::new(()),
611 global_state: args.plan_args.global_state,
612 options: args.plan_args.options.clone(),
613 allocate_as_live: AtomicBool::new(false),
614 p: PhantomData,
615 };
616
617 let vmrequest = args.plan_args.vmrequest;
618 if vmrequest.is_discontiguous() {
619 rtn.contiguous = false;
620 rtn.descriptor = SpaceDescriptor::create_descriptor();
622 return rtn;
624 }
625
626 let (extent, top) = match vmrequest {
627 VMRequest::Fraction { frac, top: _top } => (get_frac_available(frac), _top),
628 VMRequest::Extent {
629 extent: _extent,
630 top: _top,
631 } => (_extent, _top),
632 VMRequest::Fixed {
633 extent: _extent, ..
634 } => (_extent, false),
635 _ => unreachable!(),
636 };
637
638 assert!(
639 extent == raw_align_up(extent, BYTES_IN_CHUNK),
640 "{} requested non-aligned extent: {} bytes",
641 rtn.name,
642 extent
643 );
644
645 let start = if let VMRequest::Fixed { start: _start, .. } = vmrequest {
646 _start
647 } else {
648 args.plan_args.heap.reserve(extent, top)
651 };
652 assert!(
653 start == chunk_align_up(start),
654 "{} starting on non-aligned boundary: {}",
655 rtn.name,
656 start
657 );
658
659 rtn.contiguous = true;
660 rtn.start = start;
661 rtn.extent = extent;
662 rtn.descriptor = SpaceDescriptor::create_descriptor_from_heap_range(start, start + extent);
664 {
670 use crate::util::heap::layout;
671 let overlap =
672 Address::range_intersection(&(start..start + extent), &layout::available_range());
673 if !overlap.is_empty() {
674 args.plan_args.vm_map.insert(
675 overlap.start,
676 overlap.end - overlap.start,
677 rtn.descriptor,
678 );
679 }
680 }
681
682 rtn.metadata
684 .try_map_metadata_address_range(rtn.start, rtn.extent, rtn.name)
685 .unwrap_or_else(|e| {
686 panic!("failed to mmap meta memory: {e}");
688 });
689
690 debug!(
691 "Created space {} [{}, {}) for {} bytes",
692 rtn.name,
693 start,
694 start + extent,
695 extent
696 );
697
698 rtn
699 }
700
701 pub fn initialize_sft(
702 &self,
703 sft: &(dyn SFT + Sync + 'static),
704 sft_map: &mut dyn crate::policy::sft_map::SFTMap,
705 ) {
706 if self.contiguous {
713 unsafe { sft_map.eager_initialize(sft, self.start, self.extent) };
714 }
715 }
716
717 pub fn vm_map(&self) -> &'static dyn VMMap {
718 self.vm_map
719 }
720
721 pub fn mmap_protection(&self) -> MmapProtection {
722 if self.permission_exec || cfg!(feature = "exec_permission_on_all_spaces") {
723 MmapProtection::ReadWriteExec
724 } else {
725 MmapProtection::ReadWrite
726 }
727 }
728
729 pub(crate) fn debug_print_object_global_info(&self, object: ObjectReference) {
730 #[cfg(feature = "vo_bit")]
731 println!(
732 "vo bit = {}",
733 crate::util::metadata::vo_bit::is_vo_bit_set(object)
734 );
735 if self.needs_log_bit {
736 use crate::vm::object_model::ObjectModel;
737 use std::sync::atomic::Ordering;
738 println!(
739 "log bit = {}",
740 VM::VMObjectModel::GLOBAL_LOG_BIT_SPEC.is_unlogged::<VM>(object, Ordering::Relaxed),
741 );
742 }
743 println!("is live = {}", object.is_live());
744 }
745}
746
747fn get_frac_available(frac: f32) -> usize {
748 trace!("AVAILABLE_START={}", vm_layout().available_start());
749 trace!("AVAILABLE_END={}", vm_layout().available_end());
750 let bytes = (frac * vm_layout().available_bytes() as f32) as usize;
751 trace!("bytes={}*{}={}", frac, vm_layout().available_bytes(), bytes);
752 let mb = bytes >> LOG_BYTES_IN_MBYTE;
753 let rtn = mb << LOG_BYTES_IN_MBYTE;
754 trace!("rtn={}", rtn);
755 let aligned_rtn = raw_align_up(rtn, BYTES_IN_CHUNK);
756 trace!("aligned_rtn={}", aligned_rtn);
757 aligned_rtn
758}
759
760pub fn required_chunks(pages: usize) -> usize {
761 let extent = raw_align_up(pages_to_bytes(pages), BYTES_IN_CHUNK);
762 extent >> LOG_BYTES_IN_CHUNK
763}