1use crate::mmtk::SFT_MAP;
2use crate::plan::{ObjectQueue, VectorObjectQueue};
3use crate::policy::sft::GCWorkerMutRef;
4use crate::policy::sft::SFT;
5use crate::policy::space::{CommonSpace, Space};
6use crate::util::address::Address;
7use crate::util::alloc::allocator::AllocationOptions;
8use crate::util::constants::BYTES_IN_PAGE;
9use crate::util::heap::externalpageresource::{ExternalPageResource, ExternalPages};
10use crate::util::heap::layout::vm_layout::BYTES_IN_CHUNK;
11use crate::util::heap::PageResource;
12use crate::util::metadata::mark_bit::MarkState;
13#[cfg(feature = "set_unlog_bits_vm_space")]
14use crate::util::metadata::MetadataSpec;
15use crate::util::object_enum::ObjectEnumerator;
16use crate::util::opaque_pointer::*;
17use crate::util::ObjectReference;
18use crate::vm::{ObjectModel, VMBinding};
19
20use std::sync::atomic::Ordering;
21
22pub struct VMSpace<VM: VMBinding> {
27 mark_state: MarkState,
28 common: CommonSpace<VM>,
29 pr: ExternalPageResource<VM>,
30}
31
32impl<VM: VMBinding> SFT for VMSpace<VM> {
33 fn name(&self) -> &'static str {
34 self.common.name
35 }
36 fn is_live(&self, _object: ObjectReference) -> bool {
37 true
38 }
39 fn is_reachable(&self, object: ObjectReference) -> bool {
40 self.mark_state.is_marked::<VM>(object)
41 }
42 #[cfg(feature = "object_pinning")]
43 fn pin_object(&self, _object: ObjectReference) -> bool {
44 false
45 }
46 #[cfg(feature = "object_pinning")]
47 fn unpin_object(&self, _object: ObjectReference) -> bool {
48 false
49 }
50 #[cfg(feature = "object_pinning")]
51 fn is_object_pinned(&self, _object: ObjectReference) -> bool {
52 true
53 }
54 fn is_movable(&self) -> bool {
55 false
56 }
57 #[cfg(feature = "sanity")]
58 fn is_sane(&self) -> bool {
59 true
60 }
61 fn initialize_object_metadata(&self, object: ObjectReference) {
62 self.mark_state
63 .on_object_metadata_initialization::<VM>(object);
64 if self.common.unlog_allocated_object {
65 VM::VMObjectModel::GLOBAL_LOG_BIT_SPEC.mark_as_unlogged::<VM>(object, Ordering::SeqCst);
66 }
67 #[cfg(feature = "vo_bit")]
68 crate::util::metadata::vo_bit::set_vo_bit(object);
69 }
70 #[cfg(feature = "is_mmtk_object")]
71 fn is_mmtk_object(&self, addr: Address) -> Option<ObjectReference> {
72 crate::util::metadata::vo_bit::is_vo_bit_set_for_addr(addr)
73 }
74 #[cfg(feature = "is_mmtk_object")]
75 fn find_object_from_internal_pointer(
76 &self,
77 ptr: Address,
78 max_search_bytes: usize,
79 ) -> Option<ObjectReference> {
80 crate::util::metadata::vo_bit::find_object_from_internal_pointer::<VM>(
81 ptr,
82 max_search_bytes,
83 )
84 }
85 fn sft_trace_object(
86 &self,
87 queue: &mut VectorObjectQueue,
88 object: ObjectReference,
89 _worker: GCWorkerMutRef,
90 ) -> ObjectReference {
91 self.trace_object(queue, object)
92 }
93}
94
95impl<VM: VMBinding> Space<VM> for VMSpace<VM> {
96 fn as_space(&self) -> &dyn Space<VM> {
97 self
98 }
99 fn as_sft(&self) -> &(dyn SFT + Sync + 'static) {
100 self
101 }
102 fn get_page_resource(&self) -> &dyn PageResource<VM> {
103 &self.pr
104 }
105 fn maybe_get_page_resource_mut(&mut self) -> Option<&mut dyn PageResource<VM>> {
106 Some(&mut self.pr)
107 }
108 fn common(&self) -> &CommonSpace<VM> {
109 &self.common
110 }
111
112 fn initialize_sft(&self, sft_map: &mut dyn crate::policy::sft_map::SFTMap) {
113 let vm_regions = self.pr.get_external_pages();
116 assert!(vm_regions.len() <= 1);
119 for external_pages in vm_regions.iter() {
120 let start = external_pages.start.align_down(BYTES_IN_CHUNK);
122 let size = external_pages.end.align_up(BYTES_IN_CHUNK) - start;
123 debug_assert_eq!(
125 sft_map.get_checked(start).name(),
126 crate::policy::sft::EMPTY_SFT_NAME
127 );
128 assert!(sft_map.has_sft_entry(start), "The VM space start (aligned to {}) does not have a valid SFT entry. Possibly the address range is not in the address range we use.", start);
130 unsafe {
131 sft_map.eager_initialize(self.as_sft(), start, size);
132 }
133 }
134 }
135
136 fn release_multiple_pages(&mut self, _start: Address) {
137 unreachable!()
138 }
139
140 fn acquire(&self, _tls: VMThread, _pages: usize, _alloc_options: AllocationOptions) -> Address {
141 unreachable!()
142 }
143
144 fn address_in_space(&self, start: Address) -> bool {
145 SFT_MAP.get_checked(start).name() == self.name()
149 }
150
151 fn enumerate_objects(&self, enumerator: &mut dyn ObjectEnumerator) {
152 let external_pages = self.pr.get_external_pages();
153 for ep in external_pages.iter() {
154 enumerator.visit_address_range(ep.start, ep.end);
155 }
156 }
157
158 fn clear_side_log_bits(&self) {
159 let log_bit = VM::VMObjectModel::GLOBAL_LOG_BIT_SPEC.extract_side_spec();
160 let external_pages = self.pr.get_external_pages();
161 for ep in external_pages.iter() {
162 log_bit.bzero_metadata(ep.start, ep.end - ep.start);
163 }
164 }
165
166 fn set_side_log_bits(&self) {
167 let log_bit = VM::VMObjectModel::GLOBAL_LOG_BIT_SPEC.extract_side_spec();
168 let external_pages = self.pr.get_external_pages();
169 for ep in external_pages.iter() {
170 log_bit.bset_metadata(ep.start, ep.end - ep.start);
171 }
172 }
173}
174
175use crate::scheduler::GCWorker;
176use crate::util::copy::CopySemantics;
177
178impl<VM: VMBinding> crate::policy::gc_work::PolicyTraceObject<VM> for VMSpace<VM> {
179 fn trace_object<Q: ObjectQueue, const KIND: crate::policy::gc_work::TraceKind>(
180 &self,
181 queue: &mut Q,
182 object: ObjectReference,
183 _copy: Option<CopySemantics>,
184 _worker: &mut GCWorker<VM>,
185 ) -> ObjectReference {
186 self.trace_object(queue, object)
187 }
188 fn may_move_objects<const KIND: crate::policy::gc_work::TraceKind>() -> bool {
189 false
190 }
191}
192
193impl<VM: VMBinding> VMSpace<VM> {
194 pub fn new(args: crate::policy::space::PlanCreateSpaceArgs<VM>) -> Self {
195 let (vm_space_start, vm_space_size) =
196 (*args.options.vm_space_start, *args.options.vm_space_size);
197 let space = Self {
198 mark_state: MarkState::new(),
199 pr: ExternalPageResource::new(args.vm_map),
200 common: CommonSpace::new(args.into_policy_args(
201 false,
202 true,
203 crate::util::metadata::extract_side_metadata(&[
204 *VM::VMObjectModel::LOCAL_MARK_BIT_SPEC,
205 ]),
206 )),
207 };
208
209 if !vm_space_start.is_zero() {
210 space.set_vm_region_inner(vm_space_start, vm_space_size, false);
212 }
213
214 space
215 }
216
217 pub fn set_vm_region(&mut self, start: Address, size: usize) {
218 self.set_vm_region_inner(start, size, true);
219 }
220
221 fn set_vm_region_inner(&self, start: Address, size: usize, set_sft: bool) {
222 assert!(size > 0);
223 assert!(!start.is_zero());
224
225 let end = start + size;
226
227 let chunk_start = start.align_down(BYTES_IN_CHUNK);
228 let chunk_end = end.align_up(BYTES_IN_CHUNK);
229 let chunk_size = chunk_end - chunk_start;
230
231 assert!(Address::range_intersection(
234 &(chunk_start..chunk_end),
235 &crate::util::heap::layout::available_range()
236 )
237 .is_empty());
238
239 debug!(
240 "Align VM space ({}, {}) to chunk ({}, {})",
241 start, end, chunk_start, chunk_end
242 );
243
244 self.common.mmapper.mark_as_mapped(chunk_start, chunk_size);
246 self.common
248 .metadata
249 .try_map_metadata_space(chunk_start, chunk_size, self.get_name())
250 .unwrap();
251 if set_sft {
255 assert!(SFT_MAP.has_sft_entry(chunk_start), "The VM space start (aligned to {}) does not have a valid SFT entry. Possibly the address range is not in the address range we use.", chunk_start);
256 unsafe {
257 SFT_MAP.update(self.as_sft(), chunk_start, chunk_size);
258 }
259 }
260
261 self.pr.add_new_external_pages(ExternalPages {
262 start: start.align_down(BYTES_IN_PAGE),
263 end: end.align_up(BYTES_IN_PAGE),
264 });
265
266 #[cfg(feature = "set_unlog_bits_vm_space")]
267 if self.common.needs_log_bit {
268 if let MetadataSpec::OnSide(side) = *VM::VMObjectModel::GLOBAL_LOG_BIT_SPEC {
271 side.bset_metadata(start, size);
272 }
273 }
274 }
275
276 pub fn prepare(&mut self) {
277 self.mark_state.on_global_prepare::<VM>();
278 for external_pages in self.pr.get_external_pages().iter() {
279 self.mark_state.on_block_reset::<VM>(
280 external_pages.start,
281 external_pages.end - external_pages.start,
282 );
283 }
284 }
285
286 pub fn release(&mut self) {
287 self.mark_state.on_global_release::<VM>();
288 }
289
290 pub fn trace_object<Q: ObjectQueue>(
291 &self,
292 queue: &mut Q,
293 object: ObjectReference,
294 ) -> ObjectReference {
295 #[cfg(feature = "vo_bit")]
296 debug_assert!(
297 crate::util::metadata::vo_bit::is_vo_bit_set(object),
298 "{:x}: VO bit not set",
299 object
300 );
301 debug_assert!(self.in_space(object));
302 if self.mark_state.test_and_mark::<VM>(object) {
303 #[cfg(feature = "set_unlog_bits_vm_space")]
306 if self.common.unlog_traced_object {
307 VM::VMObjectModel::GLOBAL_LOG_BIT_SPEC.store_atomic::<VM, u8>(
308 object,
309 1,
310 None,
311 Ordering::SeqCst,
312 );
313 }
314 queue.enqueue(object);
315 }
316 object
317 }
318}