1use crate::plan::{ObjectQueue, VectorObjectQueue};
2use crate::policy::copy_context::PolicyCopyContext;
3use crate::policy::gc_work::TRACE_KIND_TRANSITIVE_PIN;
4use crate::policy::sft::GCWorkerMutRef;
5use crate::policy::sft::SFT;
6use crate::policy::space::{CommonSpace, Space};
7use crate::scheduler::GCWorker;
8use crate::util::alloc::allocator::AllocatorContext;
9use crate::util::heap::{MonotonePageResource, PageResource};
10use crate::util::metadata::{extract_side_metadata, MetadataSpec};
11use crate::util::object_enum::ObjectEnumerator;
12use crate::util::object_forwarding;
13use crate::util::{copy::*, object_enum};
14use crate::util::{Address, ObjectReference};
15use crate::vm::*;
16use libc::{mprotect, PROT_EXEC, PROT_NONE, PROT_READ, PROT_WRITE};
17use std::sync::atomic::{AtomicBool, Ordering};
18use std::sync::Arc;
19
20pub struct CopySpace<VM: VMBinding> {
22 common: CommonSpace<VM>,
23 pr: MonotonePageResource<VM>,
24 from_space: AtomicBool,
25}
26
27impl<VM: VMBinding> SFT for CopySpace<VM> {
28 fn name(&self) -> &'static str {
29 self.get_name()
30 }
31
32 fn is_live(&self, object: ObjectReference) -> bool {
33 !self.is_from_space() || object_forwarding::is_forwarded::<VM>(object)
34 }
35
36 #[cfg(feature = "object_pinning")]
37 fn pin_object(&self, _object: ObjectReference) -> bool {
38 panic!("Cannot pin/unpin objects of CopySpace.")
39 }
40
41 #[cfg(feature = "object_pinning")]
42 fn unpin_object(&self, _object: ObjectReference) -> bool {
43 panic!("Cannot pin/unpin objects of CopySpace.")
44 }
45
46 #[cfg(feature = "object_pinning")]
47 fn is_object_pinned(&self, _object: ObjectReference) -> bool {
48 false
49 }
50
51 fn is_movable(&self) -> bool {
52 true
53 }
54
55 #[cfg(feature = "sanity")]
56 fn is_sane(&self) -> bool {
57 !self.is_from_space()
58 }
59
60 fn initialize_object_metadata(&self, _object: ObjectReference) {
61 #[cfg(feature = "vo_bit")]
62 crate::util::metadata::vo_bit::set_vo_bit(_object);
63 }
64
65 fn get_forwarded_object(&self, object: ObjectReference) -> Option<ObjectReference> {
66 if !self.is_from_space() {
67 return None;
68 }
69
70 if object_forwarding::is_forwarded::<VM>(object) {
71 Some(object_forwarding::read_forwarding_pointer::<VM>(object))
72 } else {
73 None
74 }
75 }
76
77 #[cfg(feature = "is_mmtk_object")]
78 fn is_mmtk_object(&self, addr: Address) -> Option<ObjectReference> {
79 crate::util::metadata::vo_bit::is_vo_bit_set_for_addr(addr)
80 }
81
82 #[cfg(feature = "is_mmtk_object")]
83 fn find_object_from_internal_pointer(
84 &self,
85 ptr: Address,
86 max_search_bytes: usize,
87 ) -> Option<ObjectReference> {
88 crate::util::metadata::vo_bit::find_object_from_internal_pointer::<VM>(
89 ptr,
90 max_search_bytes,
91 )
92 }
93
94 fn sft_trace_object(
95 &self,
96 queue: &mut VectorObjectQueue,
97 object: ObjectReference,
98 worker: GCWorkerMutRef,
99 ) -> ObjectReference {
100 let worker = worker.into_mut::<VM>();
101 self.trace_object(queue, object, self.common.copy, worker)
102 }
103
104 fn debug_print_object_info(&self, object: ObjectReference) {
105 object_forwarding::debug_print_object_forwarding_info::<VM>(object);
106 self.common.debug_print_object_global_info(object);
107 }
108}
109
110impl<VM: VMBinding> Space<VM> for CopySpace<VM> {
111 fn as_space(&self) -> &dyn Space<VM> {
112 self
113 }
114
115 fn as_sft(&self) -> &(dyn SFT + Sync + 'static) {
116 self
117 }
118
119 fn get_page_resource(&self) -> &dyn PageResource<VM> {
120 &self.pr
121 }
122
123 fn maybe_get_page_resource_mut(&mut self) -> Option<&mut dyn PageResource<VM>> {
124 Some(&mut self.pr)
125 }
126
127 fn common(&self) -> &CommonSpace<VM> {
128 &self.common
129 }
130
131 fn initialize_sft(&self, sft_map: &mut dyn crate::policy::sft_map::SFTMap) {
132 self.common().initialize_sft(self.as_sft(), sft_map)
133 }
134
135 fn release_multiple_pages(&mut self, _start: Address) {
136 panic!("copyspace only releases pages enmasse")
137 }
138
139 fn set_copy_for_sft_trace(&mut self, semantics: Option<CopySemantics>) {
140 self.common.copy = semantics;
141 }
142
143 fn enumerate_objects(&self, enumerator: &mut dyn ObjectEnumerator) {
144 object_enum::enumerate_blocks_from_monotonic_page_resource(enumerator, &self.pr);
145 }
146
147 fn clear_side_log_bits(&self) {
148 let log_bit = VM::VMObjectModel::GLOBAL_LOG_BIT_SPEC.extract_side_spec();
149 for (start, size) in self.pr.iterate_allocated_regions() {
150 log_bit.bzero_metadata(start, size);
151 }
152 }
153
154 fn set_side_log_bits(&self) {
155 let log_bit = VM::VMObjectModel::GLOBAL_LOG_BIT_SPEC.extract_side_spec();
156 for (start, size) in self.pr.iterate_allocated_regions() {
157 log_bit.bset_metadata(start, size);
158 }
159 }
160}
161
162impl<VM: VMBinding> crate::policy::gc_work::PolicyTraceObject<VM> for CopySpace<VM> {
163 fn trace_object<Q: ObjectQueue, const KIND: crate::policy::gc_work::TraceKind>(
164 &self,
165 queue: &mut Q,
166 object: ObjectReference,
167 copy: Option<CopySemantics>,
168 worker: &mut GCWorker<VM>,
169 ) -> ObjectReference {
170 debug_assert!(
171 KIND != TRACE_KIND_TRANSITIVE_PIN,
172 "Copyspace does not support transitive pin trace."
173 );
174 self.trace_object(queue, object, copy, worker)
175 }
176
177 fn may_move_objects<const KIND: crate::policy::gc_work::TraceKind>() -> bool {
178 true
179 }
180}
181
182impl<VM: VMBinding> CopySpace<VM> {
183 pub fn new(args: crate::policy::space::PlanCreateSpaceArgs<VM>, from_space: bool) -> Self {
184 let vm_map = args.vm_map;
185 let is_discontiguous = args.vmrequest.is_discontiguous();
186 let common = CommonSpace::new(args.into_policy_args(
187 true,
188 false,
189 extract_side_metadata(&[
190 *VM::VMObjectModel::LOCAL_FORWARDING_BITS_SPEC,
191 *VM::VMObjectModel::LOCAL_FORWARDING_POINTER_SPEC,
192 ]),
193 ));
194 CopySpace {
195 pr: if is_discontiguous {
196 MonotonePageResource::new_discontiguous(vm_map)
197 } else {
198 MonotonePageResource::new_contiguous(common.start, common.extent, vm_map)
199 },
200 common,
201 from_space: AtomicBool::new(from_space),
202 }
203 }
204
205 pub fn prepare(&self, from_space: bool) {
206 self.from_space.store(from_space, Ordering::SeqCst);
207 }
208
209 pub fn release(&self) {
210 for (start, size) in self.pr.iterate_allocated_regions() {
211 if let MetadataSpec::OnSide(side_forwarding_status_table) =
213 *<VM::VMObjectModel as ObjectModel<VM>>::LOCAL_FORWARDING_BITS_SPEC
214 {
215 side_forwarding_status_table.bzero_metadata(start, size);
216 }
217
218 #[cfg(feature = "vo_bit")]
220 crate::util::metadata::vo_bit::bzero_vo_bit(start, size);
221 }
222
223 unsafe {
224 self.pr.reset();
225 }
226 self.from_space.store(false, Ordering::SeqCst);
227 }
228
229 fn is_from_space(&self) -> bool {
230 self.from_space.load(Ordering::SeqCst)
231 }
232
233 pub fn trace_object<Q: ObjectQueue>(
234 &self,
235 queue: &mut Q,
236 object: ObjectReference,
237 semantics: Option<CopySemantics>,
238 worker: &mut GCWorker<VM>,
239 ) -> ObjectReference {
240 trace!("copyspace.trace_object(, {:?}, {:?})", object, semantics,);
241
242 if !self.is_from_space() {
244 return object;
246 }
247
248 debug_assert!(semantics.is_some());
250
251 #[cfg(feature = "vo_bit")]
252 debug_assert!(
253 crate::util::metadata::vo_bit::is_vo_bit_set(object),
254 "{:x}: VO bit not set",
255 object
256 );
257
258 trace!("attempting to forward");
259 let forwarding_status = object_forwarding::attempt_to_forward::<VM>(object);
260
261 trace!("checking if object is being forwarded");
262 if object_forwarding::state_is_forwarded_or_being_forwarded(forwarding_status) {
263 trace!("... yes it is");
264 let new_object =
265 object_forwarding::spin_and_get_forwarded_object::<VM>(object, forwarding_status);
266 trace!("Returning");
267 new_object
268 } else {
269 trace!("... no it isn't. Copying");
270 let new_object = object_forwarding::forward_object::<VM>(
271 object,
272 semantics.unwrap(),
273 worker.get_copy_context_mut(),
274 |_new_object| {
275 #[cfg(feature = "vo_bit")]
276 crate::util::metadata::vo_bit::set_vo_bit(_new_object);
277 },
278 );
279
280 trace!("Forwarding pointer");
281 queue.enqueue(new_object);
282 trace!("Copied [{:?} -> {:?}]", object, new_object);
283 new_object
284 }
285 }
286
287 #[allow(dead_code)] pub fn protect(&self) {
289 if !self.common().contiguous {
290 panic!(
291 "Implement Options.protectOnRelease for MonotonePageResource.release_pages_extent"
292 )
293 }
294 let start = self.common().start;
295 let extent = self.common().extent;
296 unsafe {
297 mprotect(start.to_mut_ptr(), extent, PROT_NONE);
298 }
299 trace!("Protect {:x} {:x}", start, start + extent);
300 }
301
302 #[allow(dead_code)] pub fn unprotect(&self) {
304 if !self.common().contiguous {
305 panic!(
306 "Implement Options.protectOnRelease for MonotonePageResource.release_pages_extent"
307 )
308 }
309 let start = self.common().start;
310 let extent = self.common().extent;
311 unsafe {
312 mprotect(
313 start.to_mut_ptr(),
314 extent,
315 PROT_READ | PROT_WRITE | PROT_EXEC,
316 );
317 }
318 trace!("Unprotect {:x} {:x}", start, start + extent);
319 }
320}
321
322use crate::util::alloc::Allocator;
323use crate::util::alloc::BumpAllocator;
324use crate::util::opaque_pointer::VMWorkerThread;
325
326pub struct CopySpaceCopyContext<VM: VMBinding> {
328 copy_allocator: BumpAllocator<VM>,
329}
330
331impl<VM: VMBinding> PolicyCopyContext for CopySpaceCopyContext<VM> {
332 type VM = VM;
333
334 fn prepare(&mut self) {}
335
336 fn release(&mut self) {}
337
338 fn alloc_copy(
339 &mut self,
340 _original: ObjectReference,
341 bytes: usize,
342 align: usize,
343 offset: usize,
344 ) -> Address {
345 self.copy_allocator.alloc(bytes, align, offset)
346 }
347
348 fn post_copy(&mut self, obj: ObjectReference, _bytes: usize) {
349 if self.copy_allocator.get_space().common().unlog_traced_object {
350 VM::VMObjectModel::GLOBAL_LOG_BIT_SPEC
351 .mark_byte_as_unlogged::<VM>(obj, Ordering::Relaxed);
352 }
353 }
354}
355
356impl<VM: VMBinding> CopySpaceCopyContext<VM> {
357 pub(crate) fn new(
358 tls: VMWorkerThread,
359 context: Arc<AllocatorContext<VM>>,
360 tospace: &'static CopySpace<VM>,
361 ) -> Self {
362 CopySpaceCopyContext {
363 copy_allocator: BumpAllocator::new(tls.0, tospace, context),
364 }
365 }
366
367 pub fn rebind(&mut self, space: &CopySpace<VM>) {
368 self.copy_allocator
369 .rebind(unsafe { &*{ space as *const _ } });
370 }
371}