mmtk/memory_manager.rs
1//! VM-to-MMTk interface: safe Rust APIs.
2//!
3//! This module provides a safe Rust API for mmtk-core.
4//! We expect the VM binding to inherit and extend this API by:
5//! 1. adding their VM-specific functions
6//! 2. exposing the functions to native if necessary. And the VM binding needs to manage the unsafety
7//! for exposing this safe API to FFI.
8//!
9//! For example, for mutators, this API provides a `Box<Mutator>`, and requires a `&mut Mutator` for allocation.
10//! A VM binding can borrow a mutable reference directly from `Box<Mutator>`, and call `alloc()`. Alternatively,
11//! it can turn the `Box` pointer to a native pointer (`*mut Mutator`), and forge a mut reference from the native
12//! pointer. Either way, the VM binding code needs to guarantee the safety.
13
14use crate::mmtk::MMTKBuilder;
15use crate::mmtk::MMTK;
16use crate::plan::AllocationSemantics;
17use crate::plan::{Mutator, MutatorContext};
18use crate::scheduler::WorkBucketStage;
19use crate::scheduler::{GCWork, GCWorker};
20use crate::util::alloc::allocator::AllocationOptions;
21use crate::util::alloc::allocators::AllocatorSelector;
22use crate::util::constants::LOG_BYTES_IN_PAGE;
23use crate::util::heap::layout::vm_layout::vm_layout;
24use crate::util::opaque_pointer::*;
25use crate::util::{Address, ObjectReference};
26use crate::vm::slot::MemorySlice;
27use crate::vm::ReferenceGlue;
28use crate::vm::VMBinding;
29
30use std::collections::HashMap;
31
32/// Initialize an MMTk instance. A VM should call this method after creating an [`crate::MMTK`]
33/// instance but before using any of the methods provided in MMTk (except `process()` and `process_bulk()`).
34///
35/// We expect a binding to ininitialize MMTk in the following steps:
36///
37/// 1. Create an [`crate::MMTKBuilder`] instance.
38/// 2. Set command line options for MMTKBuilder by [`crate::memory_manager::process`] or [`crate::memory_manager::process_bulk`].
39/// 3. Initialize MMTk by calling this function, `mmtk_init()`, and pass the builder earlier. This call will return an MMTK instance.
40/// Usually a binding store the MMTK instance statically as a singleton. We plan to allow multiple instances, but this is not yet fully
41/// supported. Currently we assume a binding will only need one MMTk instance. Note that GC is enabled by default and the binding should
42/// implement `VMCollection::is_collection_enabled()` if it requires that the GC should be disabled at a particular time.
43///
44/// This method will attempt to initialize the built-in `env_logger` if the Cargo feature "builtin_env_logger" is enabled (by default).
45/// If the VM would like to use its own logger, it should disable the default feature "builtin_env_logger" in `Cargo.toml`.
46///
47/// Note that, to allow MMTk to do GC properly, `initialize_collection()` needs to be called after this call when
48/// the VM's thread system is ready to spawn GC workers.
49///
50/// Note that this method returns a boxed pointer of MMTK, which means MMTk has a bound lifetime with the box pointer. However, some of our current APIs assume
51/// that MMTk has a static lifetime, which presents a mismatch with this API. We plan to address the lifetime issue in the future. At this point, we recommend a binding
52/// to 'expand' the lifetime for the boxed pointer to static. There could be multiple ways to achieve it: 1. `Box::leak()` will turn the box pointer to raw pointer
53/// which has static lifetime, 2. create MMTK as a lazily initialized static variable
54/// (see [what we do for our dummy binding](https://github.com/mmtk/mmtk-core/blob/master/vmbindings/dummyvm/src/lib.rs#L42))
55///
56/// Arguments:
57/// * `builder`: The reference to a MMTk builder.
58pub fn mmtk_init<VM: VMBinding>(builder: &MMTKBuilder) -> Box<MMTK<VM>> {
59 crate::util::logger::try_init();
60 #[cfg(all(feature = "perf_counter", target_os = "linux"))]
61 {
62 use std::fs::File;
63 use std::io::Read;
64 let mut status = File::open("/proc/self/status").unwrap();
65 let mut contents = String::new();
66 status.read_to_string(&mut contents).unwrap();
67 for line in contents.lines() {
68 let split: Vec<&str> = line.split('\t').collect();
69 if split[0] == "Threads:" {
70 let threads = split[1].parse::<i32>().unwrap();
71 if threads != 1 {
72 warn!("Current process has {} threads, process-wide perf event measurement will only include child threads spawned from this thread", threads);
73 }
74 }
75 }
76 }
77 let mmtk = builder.build();
78
79 info!(
80 "Initialized MMTk with {:?} ({:?})",
81 *mmtk.options.plan, *mmtk.options.gc_trigger
82 );
83 #[cfg(feature = "extreme_assertions")]
84 warn!("The feature 'extreme_assertions' is enabled. MMTk will run expensive run-time checks. Slow performance should be expected.");
85 Box::new(mmtk)
86}
87
88/// Add an externally mmapped region to the VM space. A VM space can be set through MMTk options (`vm_space_start` and `vm_space_size`),
89/// and can also be set through this function call. A VM space can be discontiguous. This function can be called multiple times,
90/// and all the address ranges passed as arguments in the function will be considered as part of the VM space.
91/// Currently we do not allow removing regions from VM space.
92#[cfg(feature = "vm_space")]
93pub fn set_vm_space<VM: VMBinding>(mmtk: &'static mut MMTK<VM>, start: Address, size: usize) {
94 unsafe { mmtk.get_plan_mut() }
95 .base_mut()
96 .vm_space
97 .set_vm_region(start, size);
98}
99
100/// Request MMTk to create a mutator for the given thread. The ownership
101/// of returned boxed mutator is transferred to the binding, and the binding needs to take care of its
102/// lifetime. For performance reasons, A VM should store the returned mutator in a thread local storage
103/// that can be accessed efficiently. A VM may also copy and embed the mutator stucture to a thread-local data
104/// structure, and use that as a reference to the mutator (it is okay to drop the box once the content is copied --
105/// Note that `Mutator` may contain pointers so a binding may drop the box only if they perform a deep copy).
106///
107/// MMTk generally does not expect the runtime to create or destroy mutators during a pause. See also [`crate::vm::ActivePlan::mutators`].
108///
109/// Arguments:
110/// * `mmtk`: A reference to an MMTk instance.
111/// * `tls`: The thread that will be associated with the mutator.
112pub fn bind_mutator<VM: VMBinding>(
113 mmtk: &'static MMTK<VM>,
114 tls: VMMutatorThread,
115) -> Box<Mutator<VM>> {
116 let mutator = crate::plan::create_mutator(tls, mmtk);
117
118 const LOG_ALLOCATOR_MAPPING: bool = false;
119 if LOG_ALLOCATOR_MAPPING {
120 info!("{:?}", mutator.config);
121 }
122 mutator
123}
124
125/// Report to MMTk that a mutator is no longer needed. All mutator state is flushed before it is
126/// destroyed. A binding should not attempt to use the mutator after this call. MMTk will not
127/// attempt to reclaim the memory for the mutator, so a binding should properly reclaim the memory
128/// for the mutator after this call.
129///
130/// MMTk generally does not expect the runtime to create or destroy mutators during a pause. See also [`crate::vm::ActivePlan::mutators`].
131///
132/// Arguments:
133/// * `mutator`: A reference to the mutator to be destroyed.
134pub fn destroy_mutator<VM: VMBinding>(mutator: &mut Mutator<VM>) {
135 mutator.flush();
136 mutator.on_destroy();
137}
138
139/// Flush the mutator's local states.
140///
141/// Arguments:
142/// * `mutator`: A reference to the mutator.
143pub fn flush_mutator<VM: VMBinding>(mutator: &mut Mutator<VM>) {
144 mutator.flush()
145}
146
147/// Allocate memory for an object.
148///
149/// When the allocation is successful, it returns the starting address of the new object. The
150/// memory range for the new object is `size` bytes starting from the returned address, and
151/// `RETURNED_ADDRESS + offset` is guaranteed to be aligned to the `align` parameter. The returned
152/// address of a successful allocation will never be zero.
153///
154/// If MMTk fails to allocate memory, it will attempt a GC to free up some memory and retry the
155/// allocation. After triggering GC, it will call [`crate::vm::Collection::block_for_gc`] to suspend
156/// the current thread that is allocating. Callers of `alloc` must be aware of this behavior.
157/// For example, JIT compilers that support
158/// precise stack scanning need to make the call site of `alloc` a GC-safe point by generating stack maps. See
159/// [`alloc_with_options`] if it is undesirable to trigger GC at this allocation site.
160///
161/// If MMTk has attempted at least one GC, and still cannot free up enough memory, it will call
162/// [`crate::vm::Collection::out_of_memory`] to inform the binding. The VM binding
163/// can implement that method to handle the out-of-memory event in a VM-specific way, including but
164/// not limited to throwing exceptions or errors. If [`crate::vm::Collection::out_of_memory`] returns
165/// normally without panicking or throwing exceptions, this function will return zero.
166///
167/// For performance reasons, a VM should implement the allocation fast-path on their side rather
168/// than just calling this function.
169///
170/// Arguments:
171/// * `mutator`: The mutator to perform this allocation request.
172/// * `size`: The number of bytes required for the object.
173/// * `align`: Required alignment for the object.
174/// * `offset`: Offset associated with the alignment.
175/// * `semantics`: The allocation semantic required for the allocation.
176pub fn alloc<VM: VMBinding>(
177 mutator: &mut Mutator<VM>,
178 size: usize,
179 align: usize,
180 offset: usize,
181 semantics: AllocationSemantics,
182) -> Address {
183 #[cfg(debug_assertions)]
184 crate::util::alloc::allocator::assert_allocation_args::<VM>(size, align, offset);
185
186 mutator.alloc(size, align, offset, semantics)
187}
188
189/// Allocate memory for an object.
190///
191/// This allocation function allows alternation to the allocation behaviors, specified by the
192/// [`crate::util::alloc::AllocationOptions`]. For example, one can allow
193/// overcommit the memory to go beyond the heap size without triggering a GC. This function can be
194/// used in certain cases where the runtime needs a different allocation behavior other than
195/// what the default [`alloc`] provides.
196///
197/// Arguments:
198/// * `mutator`: The mutator to perform this allocation request.
199/// * `size`: The number of bytes required for the object.
200/// * `align`: Required alignment for the object.
201/// * `offset`: Offset associated with the alignment.
202/// * `semantics`: The allocation semantic required for the allocation.
203/// * `options`: the allocation options to change the default allocation behavior for this request.
204pub fn alloc_with_options<VM: VMBinding>(
205 mutator: &mut Mutator<VM>,
206 size: usize,
207 align: usize,
208 offset: usize,
209 semantics: AllocationSemantics,
210 options: crate::util::alloc::allocator::AllocationOptions,
211) -> Address {
212 #[cfg(debug_assertions)]
213 crate::util::alloc::allocator::assert_allocation_args::<VM>(size, align, offset);
214
215 mutator.alloc_with_options(size, align, offset, semantics, options)
216}
217
218/// Invoke the allocation slow path of [`alloc`].
219/// Like [`alloc`], this function may trigger GC and call [`crate::vm::Collection::block_for_gc`] or
220/// [`crate::vm::Collection::out_of_memory`]. The caller needs to be aware of that.
221///
222/// *Notes*: This is only intended for use when a binding implements the fastpath on
223/// the binding side. When the binding handles fast path allocation and the fast path fails, it can use this
224/// method for slow path allocation. Calling before exhausting fast path allocaiton buffer will lead to bad
225/// performance.
226///
227/// Arguments:
228/// * `mutator`: The mutator to perform this allocation request.
229/// * `size`: The number of bytes required for the object.
230/// * `align`: Required alignment for the object.
231/// * `offset`: Offset associated with the alignment.
232/// * `semantics`: The allocation semantic required for the allocation.
233pub fn alloc_slow<VM: VMBinding>(
234 mutator: &mut Mutator<VM>,
235 size: usize,
236 align: usize,
237 offset: usize,
238 semantics: AllocationSemantics,
239) -> Address {
240 mutator.alloc_slow(size, align, offset, semantics)
241}
242
243/// Invoke the allocation slow path of [`alloc_with_options`].
244///
245/// Like [`alloc_with_options`], This allocation function allows alternation to the allocation behaviors, specified by the
246/// [`crate::util::alloc::AllocationOptions`]. For example, one can allow
247/// overcommit the memory to go beyond the heap size without triggering a GC. This function can be
248/// used in certain cases where the runtime needs a different allocation behavior other than
249/// what the default [`alloc`] provides.
250///
251/// Like [`alloc_slow`], this function is also only intended for use when a binding implements the
252/// fastpath on the binding side.
253///
254/// Arguments:
255/// * `mutator`: The mutator to perform this allocation request.
256/// * `size`: The number of bytes required for the object.
257/// * `align`: Required alignment for the object.
258/// * `offset`: Offset associated with the alignment.
259/// * `semantics`: The allocation semantic required for the allocation.
260pub fn alloc_slow_with_options<VM: VMBinding>(
261 mutator: &mut Mutator<VM>,
262 size: usize,
263 align: usize,
264 offset: usize,
265 semantics: AllocationSemantics,
266 options: AllocationOptions,
267) -> Address {
268 mutator.alloc_slow_with_options(size, align, offset, semantics, options)
269}
270
271/// Perform post-allocation actions, usually initializing object metadata. For many allocators none are
272/// required. For performance reasons, a VM should implement the post alloc fast-path on their side
273/// rather than just calling this function.
274///
275/// Arguments:
276/// * `mutator`: The mutator to perform post-alloc actions.
277/// * `refer`: The newly allocated object.
278/// * `bytes`: The size of the space allocated for the object (in bytes).
279/// * `semantics`: The allocation semantics used for the allocation.
280pub fn post_alloc<VM: VMBinding>(
281 mutator: &mut Mutator<VM>,
282 refer: ObjectReference,
283 bytes: usize,
284 semantics: AllocationSemantics,
285) {
286 mutator.post_alloc(refer, bytes, semantics);
287}
288
289/// The *subsuming* write barrier by MMTk. For performance reasons, a VM should implement the write barrier
290/// fast-path on their side rather than just calling this function.
291///
292/// For a correct barrier implementation, a VM binding needs to choose one of the following options:
293/// * Use subsuming barrier `object_reference_write`
294/// * Use both `object_reference_write_pre` and `object_reference_write_post`, or both, if the binding has difficulty delegating the store to mmtk-core with the subsuming barrier.
295/// * Implement fast-path on the VM side, and call the generic api `object_reference_write_slow` as barrier slow-path call.
296/// * Implement fast-path on the VM side, and do a specialized slow-path call.
297///
298/// Arguments:
299/// * `mutator`: The mutator for the current thread.
300/// * `src`: The modified source object.
301/// * `slot`: The location of the field to be modified.
302/// * `target`: The target for the write operation.
303///
304/// # Deprecated
305///
306/// This function needs to be redesigned. Its current form has multiple issues.
307///
308/// - It is only able to write non-null object references into the slot. But dynamic language
309/// VMs may write non-reference values, such as tagged small integers, special values such as
310/// `null`, `undefined`, `true`, `false`, etc. into a field that previous contains an object
311/// reference.
312/// - It relies on `slot.store` to write `target` into the slot, but `slot.store` is designed for
313/// forwarding references when an object is moved by GC, and is supposed to preserve tagged
314/// type information, the offset (if it is an interior pointer), etc. A write barrier is
315/// associated to an assignment operation, which usually updates such information instead.
316///
317/// We will redesign a more general subsuming write barrier to address those problems and replace
318/// the current `object_reference_write`. Before that happens, VM bindings should use
319/// `object_reference_write_pre` and `object_reference_write_post` instead.
320#[deprecated = "Use `object_reference_write_pre` and `object_reference_write_post` instead, until this function is redesigned"]
321pub fn object_reference_write<VM: VMBinding>(
322 mutator: &mut Mutator<VM>,
323 src: ObjectReference,
324 slot: VM::VMSlot,
325 target: ObjectReference,
326) {
327 mutator.barrier().object_reference_write(src, slot, target);
328}
329
330/// The write barrier by MMTk. This is a *pre* write barrier, which we expect a binding to call
331/// *before* it modifies an object. For performance reasons, a VM should implement the write barrier
332/// fast-path on their side rather than just calling this function.
333///
334/// For a correct barrier implementation, a VM binding needs to choose one of the following options:
335/// * Use subsuming barrier `object_reference_write`
336/// * Use both `object_reference_write_pre` and `object_reference_write_post`, or both, if the binding has difficulty delegating the store to mmtk-core with the subsuming barrier.
337/// * Implement fast-path on the VM side, and call the generic api `object_reference_write_slow` as barrier slow-path call.
338/// * Implement fast-path on the VM side, and do a specialized slow-path call.
339///
340/// Arguments:
341/// * `mutator`: The mutator for the current thread.
342/// * `src`: The modified source object.
343/// * `slot`: The location of the field to be modified.
344/// * `target`: The target for the write operation. `None` if the slot did not hold an object
345/// reference before the write operation. For example, the slot may be holding a `null`
346/// reference, a small integer, or special values such as `true`, `false`, `undefined`, etc.
347pub fn object_reference_write_pre<VM: VMBinding>(
348 mutator: &mut Mutator<VM>,
349 src: ObjectReference,
350 slot: VM::VMSlot,
351 target: Option<ObjectReference>,
352) {
353 mutator
354 .barrier()
355 .object_reference_write_pre(src, slot, target);
356}
357
358/// The write barrier by MMTk. This is a *post* write barrier, which we expect a binding to call
359/// *after* it modifies an object. For performance reasons, a VM should implement the write barrier
360/// fast-path on their side rather than just calling this function.
361///
362/// For a correct barrier implementation, a VM binding needs to choose one of the following options:
363/// * Use subsuming barrier `object_reference_write`
364/// * Use both `object_reference_write_pre` and `object_reference_write_post`, or both, if the binding has difficulty delegating the store to mmtk-core with the subsuming barrier.
365/// * Implement fast-path on the VM side, and call the generic api `object_reference_write_slow` as barrier slow-path call.
366/// * Implement fast-path on the VM side, and do a specialized slow-path call.
367///
368/// Arguments:
369/// * `mutator`: The mutator for the current thread.
370/// * `src`: The modified source object.
371/// * `slot`: The location of the field to be modified.
372/// * `target`: The target for the write operation. `None` if the slot no longer hold an object
373/// reference after the write operation. This may happen when writing a `null` reference, a small
374/// integers, or a special value such as`true`, `false`, `undefined`, etc., into the slot.
375pub fn object_reference_write_post<VM: VMBinding>(
376 mutator: &mut Mutator<VM>,
377 src: ObjectReference,
378 slot: VM::VMSlot,
379 target: Option<ObjectReference>,
380) {
381 mutator
382 .barrier()
383 .object_reference_write_post(src, slot, target);
384}
385
386/// The *subsuming* memory region copy barrier by MMTk.
387/// This is called when the VM tries to copy a piece of heap memory to another.
388/// The data within the slice does not necessarily to be all valid pointers,
389/// but the VM binding will be able to filter out non-reference values on slot iteration.
390///
391/// For VMs that performs a heap memory copy operation, for example OpenJDK's array copy operation, the binding needs to
392/// call `memory_region_copy*` APIs. Same as `object_reference_write*`, the binding can choose either the subsuming barrier,
393/// or the pre/post barrier.
394///
395/// Arguments:
396/// * `mutator`: The mutator for the current thread.
397/// * `src`: Source memory slice to copy from.
398/// * `dst`: Destination memory slice to copy to.
399///
400/// The size of `src` and `dst` shoule be equal
401pub fn memory_region_copy<VM: VMBinding>(
402 mutator: &'static mut Mutator<VM>,
403 src: VM::VMMemorySlice,
404 dst: VM::VMMemorySlice,
405) {
406 debug_assert_eq!(src.bytes(), dst.bytes());
407 mutator.barrier().memory_region_copy(src, dst);
408}
409
410/// The *generic* memory region copy *pre* barrier by MMTk, which we expect a binding to call
411/// *before* it performs memory copy.
412/// This is called when the VM tries to copy a piece of heap memory to another.
413/// The data within the slice does not necessarily to be all valid pointers,
414/// but the VM binding will be able to filter out non-reference values on slot iteration.
415///
416/// For VMs that performs a heap memory copy operation, for example OpenJDK's array copy operation, the binding needs to
417/// call `memory_region_copy*` APIs. Same as `object_reference_write*`, the binding can choose either the subsuming barrier,
418/// or the pre/post barrier.
419///
420/// Arguments:
421/// * `mutator`: The mutator for the current thread.
422/// * `src`: Source memory slice to copy from.
423/// * `dst`: Destination memory slice to copy to.
424///
425/// The size of `src` and `dst` shoule be equal
426pub fn memory_region_copy_pre<VM: VMBinding>(
427 mutator: &'static mut Mutator<VM>,
428 src: VM::VMMemorySlice,
429 dst: VM::VMMemorySlice,
430) {
431 debug_assert_eq!(src.bytes(), dst.bytes());
432 mutator.barrier().memory_region_copy_pre(src, dst);
433}
434
435/// The *generic* memory region copy *post* barrier by MMTk, which we expect a binding to call
436/// *after* it performs memory copy.
437/// This is called when the VM tries to copy a piece of heap memory to another.
438/// The data within the slice does not necessarily to be all valid pointers,
439/// but the VM binding will be able to filter out non-reference values on slot iteration.
440///
441/// For VMs that performs a heap memory copy operation, for example OpenJDK's array copy operation, the binding needs to
442/// call `memory_region_copy*` APIs. Same as `object_reference_write*`, the binding can choose either the subsuming barrier,
443/// or the pre/post barrier.
444///
445/// Arguments:
446/// * `mutator`: The mutator for the current thread.
447/// * `src`: Source memory slice to copy from.
448/// * `dst`: Destination memory slice to copy to.
449///
450/// The size of `src` and `dst` shoule be equal
451pub fn memory_region_copy_post<VM: VMBinding>(
452 mutator: &'static mut Mutator<VM>,
453 src: VM::VMMemorySlice,
454 dst: VM::VMMemorySlice,
455) {
456 debug_assert_eq!(src.bytes(), dst.bytes());
457 mutator.barrier().memory_region_copy_post(src, dst);
458}
459
460/// Return an AllocatorSelector for the given allocation semantic. This method is provided
461/// so that VM compilers may call it to help generate allocation fast-path.
462///
463/// Arguments:
464/// * `mmtk`: The reference to an MMTk instance.
465/// * `semantics`: The allocation semantic to query.
466pub fn get_allocator_mapping<VM: VMBinding>(
467 mmtk: &MMTK<VM>,
468 semantics: AllocationSemantics,
469) -> AllocatorSelector {
470 mmtk.get_plan().get_allocator_mapping()[semantics]
471}
472
473/// The standard malloc. MMTk either uses its own allocator, or forward the call to a
474/// library malloc.
475pub fn malloc(size: usize) -> Address {
476 crate::util::malloc::malloc(size)
477}
478
479/// The standard malloc except that with the feature `malloc_counted_size`, MMTk will count the allocated memory into its heap size.
480/// Thus the method requires a reference to an MMTk instance. MMTk either uses its own allocator, or forward the call to a
481/// library malloc.
482#[cfg(feature = "malloc_counted_size")]
483pub fn counted_malloc<VM: VMBinding>(mmtk: &MMTK<VM>, size: usize) -> Address {
484 crate::util::malloc::counted_malloc(mmtk, size)
485}
486
487/// The standard calloc.
488pub fn calloc(num: usize, size: usize) -> Address {
489 crate::util::malloc::calloc(num, size)
490}
491
492/// The standard calloc except that with the feature `malloc_counted_size`, MMTk will count the allocated memory into its heap size.
493/// Thus the method requires a reference to an MMTk instance.
494#[cfg(feature = "malloc_counted_size")]
495pub fn counted_calloc<VM: VMBinding>(mmtk: &MMTK<VM>, num: usize, size: usize) -> Address {
496 crate::util::malloc::counted_calloc(mmtk, num, size)
497}
498
499/// The standard realloc.
500pub fn realloc(addr: Address, size: usize) -> Address {
501 crate::util::malloc::realloc(addr, size)
502}
503
504/// The standard realloc except that with the feature `malloc_counted_size`, MMTk will count the allocated memory into its heap size.
505/// Thus the method requires a reference to an MMTk instance, and the size of the existing memory that will be reallocated.
506/// The `addr` in the arguments must be an address that is earlier returned from MMTk's `malloc()`, `calloc()` or `realloc()`.
507#[cfg(feature = "malloc_counted_size")]
508pub fn realloc_with_old_size<VM: VMBinding>(
509 mmtk: &MMTK<VM>,
510 addr: Address,
511 size: usize,
512 old_size: usize,
513) -> Address {
514 crate::util::malloc::realloc_with_old_size(mmtk, addr, size, old_size)
515}
516
517/// The standard free.
518/// The `addr` in the arguments must be an address that is earlier returned from MMTk's `malloc()`, `calloc()` or `realloc()`.
519pub fn free(addr: Address) {
520 crate::util::malloc::free(addr)
521}
522
523/// The standard free except that with the feature `malloc_counted_size`, MMTk will count the allocated memory into its heap size.
524/// Thus the method requires a reference to an MMTk instance, and the size of the memory to free.
525/// The `addr` in the arguments must be an address that is earlier returned from MMTk's `malloc()`, `calloc()` or `realloc()`.
526#[cfg(feature = "malloc_counted_size")]
527pub fn free_with_size<VM: VMBinding>(mmtk: &MMTK<VM>, addr: Address, old_size: usize) {
528 crate::util::malloc::free_with_size(mmtk, addr, old_size)
529}
530
531/// Get the current active malloc'd bytes. Here MMTk only accounts for bytes that are done through those 'counted malloc' functions.
532#[cfg(feature = "malloc_counted_size")]
533pub fn get_malloc_bytes<VM: VMBinding>(mmtk: &MMTK<VM>) -> usize {
534 use std::sync::atomic::Ordering;
535 mmtk.state.malloc_bytes.load(Ordering::SeqCst)
536}
537
538/// Poll for GC. MMTk will decide if a GC is needed. If so, this call will block
539/// the current thread, and trigger a GC. Otherwise, it will simply return.
540/// Usually a binding does not need to call this function. MMTk will poll for GC during its allocation.
541/// However, if a binding uses counted malloc (which won't poll for GC), they may want to poll for GC manually.
542/// This function should only be used by mutator threads.
543pub fn gc_poll<VM: VMBinding>(mmtk: &MMTK<VM>, tls: VMMutatorThread) {
544 use crate::vm::{ActivePlan, Collection};
545 debug_assert!(
546 VM::VMActivePlan::is_mutator(tls.0),
547 "gc_poll() can only be called by a mutator thread."
548 );
549
550 if mmtk.gc_trigger.poll(false, None) {
551 debug!("Collection required");
552 if !mmtk.state.is_initialized() {
553 panic!("GC is not allowed here: collection is not initialized (did you call initialize_collection()?).");
554 }
555 VM::VMCollection::block_for_gc(tls);
556 }
557}
558
559/// Wrapper for [`crate::scheduler::GCWorker::run`].
560pub fn start_worker<VM: VMBinding>(
561 mmtk: &'static MMTK<VM>,
562 tls: VMWorkerThread,
563 worker: Box<GCWorker<VM>>,
564) {
565 worker.run(tls, mmtk);
566}
567
568/// Wrapper for [`crate::mmtk::MMTK::initialize_collection`].
569pub fn initialize_collection<VM: VMBinding>(mmtk: &'static MMTK<VM>, tls: VMThread) {
570 mmtk.initialize_collection(tls);
571}
572
573/// Process MMTk run-time options. Returns true if the option is processed successfully.
574///
575/// Arguments:
576/// * `mmtk`: A reference to an MMTk instance.
577/// * `name`: The name of the option.
578/// * `value`: The value of the option (as a string).
579pub fn process(builder: &mut MMTKBuilder, name: &str, value: &str) -> bool {
580 builder.set_option(name, value)
581}
582
583/// Process multiple MMTk run-time options. Returns true if all the options are processed successfully.
584///
585/// Arguments:
586/// * `mmtk`: A reference to an MMTk instance.
587/// * `options`: a string that is key value pairs separated by white spaces, e.g. "threads=1 stress_factor=4096"
588pub fn process_bulk(builder: &mut MMTKBuilder, options: &str) -> bool {
589 builder.set_options_bulk_by_str(options)
590}
591
592/// Return used memory in bytes. MMTk accounts for memory in pages, thus this method always returns a value in
593/// page granularity.
594///
595/// Arguments:
596/// * `mmtk`: A reference to an MMTk instance.
597pub fn used_bytes<VM: VMBinding>(mmtk: &MMTK<VM>) -> usize {
598 mmtk.get_plan().get_used_pages() << LOG_BYTES_IN_PAGE
599}
600
601/// Return free memory in bytes. MMTk accounts for memory in pages, thus this method always returns a value in
602/// page granularity.
603///
604/// Arguments:
605/// * `mmtk`: A reference to an MMTk instance.
606pub fn free_bytes<VM: VMBinding>(mmtk: &MMTK<VM>) -> usize {
607 mmtk.get_plan().get_free_pages() << LOG_BYTES_IN_PAGE
608}
609
610/// Return a hash map for live bytes statistics in the last GC for each space.
611///
612/// MMTk usually accounts for memory in pages by each space.
613/// This is a special method that we count the size of every live object in a GC, and sum up the total bytes.
614/// We provide this method so users can use [`crate::LiveBytesStats`] to know if
615/// the space is fragmented.
616/// The value returned by this method is only updated when we finish tracing in a GC. A recommended timing
617/// to call this method is at the end of a GC (e.g. when the runtime is about to resume threads).
618pub fn live_bytes_in_last_gc<VM: VMBinding>(
619 mmtk: &MMTK<VM>,
620) -> HashMap<&'static str, crate::LiveBytesStats> {
621 mmtk.state.live_bytes_in_last_gc.borrow().clone()
622}
623
624/// Return the starting address of the heap. *Note that currently MMTk uses
625/// a fixed address range as heap.*
626pub fn starting_heap_address() -> Address {
627 vm_layout().heap_start
628}
629
630/// Return the ending address of the heap. *Note that currently MMTk uses
631/// a fixed address range as heap.*
632pub fn last_heap_address() -> Address {
633 vm_layout().heap_end
634}
635
636/// Return the total memory in bytes.
637///
638/// Arguments:
639/// * `mmtk`: A reference to an MMTk instance.
640pub fn total_bytes<VM: VMBinding>(mmtk: &MMTK<VM>) -> usize {
641 mmtk.get_plan().get_total_pages() << LOG_BYTES_IN_PAGE
642}
643
644/// The application code has requested a collection. This is just a GC hint, and
645/// we may ignore it.
646///
647/// Returns whether a GC was ran or not. If MMTk triggers a GC, this method will block the
648/// calling thread and return true when the GC finishes. Otherwise, this method returns
649/// false immediately.
650///
651/// Arguments:
652/// * `mmtk`: A reference to an MMTk instance.
653/// * `tls`: The thread that triggers this collection request.
654pub fn handle_user_collection_request<VM: VMBinding>(
655 mmtk: &MMTK<VM>,
656 tls: VMMutatorThread,
657) -> bool {
658 mmtk.handle_user_collection_request(tls, false, false)
659}
660
661/// Is the object alive?
662///
663/// Arguments:
664/// * `object`: The object reference to query.
665pub fn is_live_object(object: ObjectReference) -> bool {
666 object.is_live()
667}
668
669/// Check if `addr` is the raw address of an object reference to an MMTk object.
670///
671/// Concretely:
672/// 1. Return `Some(object)` if `ObjectReference::from_raw_address(addr)` is a valid object
673/// reference to an object in any space in MMTk. `object` is the result of
674/// `ObjectReference::from_raw_address(addr)`.
675/// 2. Return `None` otherwise.
676///
677/// This function is useful for conservative root scanning. The VM can iterate through all words in
678/// a stack, filter out zeros, misaligned words, obviously out-of-range words (such as addresses
679/// greater than `0x0000_7fff_ffff_ffff` on Linux on x86_64), and use this function to deside if the
680/// word is really a reference.
681///
682/// This function does not handle internal pointers. If a binding may have internal pointers on
683/// the stack, and requires identifying the base reference for an internal pointer, they should use
684/// [`find_object_from_internal_pointer`] instead.
685///
686/// Note: This function has special behaviors if the VM space (enabled by the `vm_space` feature)
687/// is present. See `crate::plan::global::BasePlan::vm_space`.
688///
689/// Argument:
690/// * `addr`: A non-zero word-aligned address. Because the raw address of an `ObjectReference`
691/// cannot be zero and must be word-aligned, the caller must filter out zero and misaligned
692/// addresses before calling this function. Otherwise the behavior is undefined.
693#[cfg(feature = "vo_bit")]
694pub fn is_mmtk_object(addr: Address) -> Option<ObjectReference> {
695 crate::util::is_mmtk_object::check_object_reference(addr)
696}
697
698/// Find if there is an object with VO bit set for the given address range.
699/// This should be used instead of [`crate::memory_manager::is_mmtk_object`] for conservative stack scanning if
700/// the binding may have internal pointers on the stack.
701///
702/// Note that, we only consider pointers that point to addresses that are equal to or greater than
703/// the raw addresss of the object's `ObjectReference`, and within the allocation as 'internal
704/// pointers'. To be precise, for each object ref `obj_ref`, internal pointers are in the range
705/// `[obj_ref.to_raw_address(), obj_ref.to_object_start() +
706/// ObjectModel::get_current_size(obj_ref))`. If a binding defines internal pointers differently,
707/// calling this method is undefined behavior. If this is the case for you, please submit an issue
708/// or engage us on Zulip to discuss more.
709///
710/// Note that, in the similar situation as [`crate::memory_manager::is_mmtk_object`], the binding should filter
711/// out obvious non-pointers (e.g. alignment check, bound check, etc) before calling this function to avoid unnecessary
712/// cost. This method is not cheap.
713///
714/// To minimize the cost, the user should also use a small `max_search_bytes`.
715///
716/// Note: This function has special behaviors if the VM space (enabled by the `vm_space` feature)
717/// is present. See `crate::plan::global::BasePlan::vm_space`.
718///
719/// Argument:
720/// * `internal_ptr`: The address to start searching. We search backwards from this address (including this address) to find the base reference.
721/// * `max_search_bytes`: The maximum number of bytes we may search for an object with VO bit set. `internal_ptr - max_search_bytes` is not included.
722#[cfg(feature = "vo_bit")]
723pub fn find_object_from_internal_pointer(
724 internal_ptr: Address,
725 max_search_bytes: usize,
726) -> Option<ObjectReference> {
727 crate::util::is_mmtk_object::check_internal_reference(internal_ptr, max_search_bytes)
728}
729
730/// Return true if the `object` lies in a region of memory where
731/// - only MMTk can allocate into, or
732/// - only MMTk's delegated memory allocator (such as a malloc implementation) can allocate into
733/// for allocation requests from MMTk.
734///
735/// Return false otherwise. This function never panics.
736///
737/// Particularly, if this function returns true, `object` cannot be an object allocated by the VM
738/// itself.
739///
740/// If this function returns true, the object cannot be allocate by the `malloc` function called by
741/// the VM, either. In other words, if the `MallocSpace` of MMTk called `malloc` to allocate the
742/// object for the VM in response to `memory_manager::alloc`, this function will return true; but
743/// if the VM directly called `malloc` to allocate the object, this function will return false.
744///
745/// If `is_mmtk_object(object.to_raw_address())` returns true, `is_in_mmtk_spaces(object)` must also
746/// return true.
747///
748/// This function is useful if an object reference in the VM can be either a pointer into the MMTk
749/// heap, or a pointer to non-MMTk objects. If the VM has a pre-built boot image that contains
750/// primordial objects, or if the VM has its own allocator or uses any third-party allocators, or
751/// if the VM allows an object reference to point to native objects such as C++ objects, this
752/// function can distinguish between MMTk-allocated objects and other objects.
753///
754/// Note: This function has special behaviors if the VM space (enabled by the `vm_space` feature)
755/// is present. See `crate::plan::global::BasePlan::vm_space`.
756///
757/// Arguments:
758/// * `object`: The object reference to query.
759pub fn is_in_mmtk_spaces(object: ObjectReference) -> bool {
760 use crate::mmtk::SFT_MAP;
761 SFT_MAP
762 .get_checked(object.to_raw_address())
763 .is_in_space(object)
764}
765
766/// Is the address in the mapped memory? The runtime can use this function to check
767/// if an address is mapped by MMTk. Note that this is different than is_in_mmtk_spaces().
768/// For malloc spaces, MMTk does not map those addresses (malloc does the mmap), so
769/// this function will return false, but is_in_mmtk_spaces will return true if the address
770/// is actually a valid object in malloc spaces. To check if an object is in our heap,
771/// the runtime should always use is_in_mmtk_spaces(). This function is_mapped_address()
772/// may get removed at some point.
773///
774/// Arguments:
775/// * `address`: The address to query.
776// TODO: Do we really need this function? Can a runtime always use is_mapped_object()?
777pub fn is_mapped_address(address: Address) -> bool {
778 address.is_mapped()
779}
780
781/// Add a reference to the list of weak references. A binding may
782/// call this either when a weak reference is created, or when a weak reference is traced during GC.
783///
784/// Arguments:
785/// * `mmtk`: A reference to an MMTk instance.
786/// * `reff`: The weak reference to add.
787pub fn add_weak_candidate<VM: VMBinding>(mmtk: &MMTK<VM>, reff: ObjectReference) {
788 mmtk.reference_processors.add_weak_candidate(reff);
789}
790
791/// Add a reference to the list of soft references. A binding may
792/// call this either when a weak reference is created, or when a weak reference is traced during GC.
793///
794/// Arguments:
795/// * `mmtk`: A reference to an MMTk instance.
796/// * `reff`: The soft reference to add.
797pub fn add_soft_candidate<VM: VMBinding>(mmtk: &MMTK<VM>, reff: ObjectReference) {
798 mmtk.reference_processors.add_soft_candidate(reff);
799}
800
801/// Add a reference to the list of phantom references. A binding may
802/// call this either when a weak reference is created, or when a weak reference is traced during GC.
803///
804/// Arguments:
805/// * `mmtk`: A reference to an MMTk instance.
806/// * `reff`: The phantom reference to add.
807pub fn add_phantom_candidate<VM: VMBinding>(mmtk: &MMTK<VM>, reff: ObjectReference) {
808 mmtk.reference_processors.add_phantom_candidate(reff);
809}
810
811/// Generic hook to allow benchmarks to be harnessed. We do a full heap
812/// GC, and then start recording statistics for MMTk.
813///
814/// Arguments:
815/// * `mmtk`: A reference to an MMTk instance.
816/// * `tls`: The thread that calls the function (and triggers a collection).
817pub fn harness_begin<VM: VMBinding>(mmtk: &MMTK<VM>, tls: VMMutatorThread) {
818 mmtk.harness_begin(tls);
819}
820
821/// Generic hook to allow benchmarks to be harnessed. We stop collecting
822/// statistics, and print stats values.
823///
824/// Arguments:
825/// * `mmtk`: A reference to an MMTk instance.
826pub fn harness_end<VM: VMBinding>(mmtk: &'static MMTK<VM>) {
827 mmtk.harness_end();
828}
829
830/// Register a finalizable object. MMTk will retain the liveness of
831/// the object even if it is not reachable from the program.
832/// Note that finalization upon exit is not supported.
833///
834/// Arguments:
835/// * `mmtk`: A reference to an MMTk instance
836/// * `object`: The object that has a finalizer
837pub fn add_finalizer<VM: VMBinding>(
838 mmtk: &'static MMTK<VM>,
839 object: <VM::VMReferenceGlue as ReferenceGlue<VM>>::FinalizableType,
840) {
841 if *mmtk.options.no_finalizer {
842 warn!("add_finalizer() is called when no_finalizer = true");
843 }
844
845 mmtk.finalizable_processor.lock().unwrap().add(object);
846}
847
848/// Pin an object. MMTk will make sure that the object does not move
849/// during GC. Note that action cannot happen in some plans, eg, semispace.
850/// It returns true if the pinning operation has been performed, i.e.,
851/// the object status changed from non-pinned to pinned
852///
853/// Arguments:
854/// * `object`: The object to be pinned
855#[cfg(feature = "object_pinning")]
856pub fn pin_object(object: ObjectReference) -> bool {
857 use crate::mmtk::SFT_MAP;
858 SFT_MAP
859 .get_checked(object.to_raw_address())
860 .pin_object(object)
861}
862
863/// Unpin an object.
864/// Returns true if the unpinning operation has been performed, i.e.,
865/// the object status changed from pinned to non-pinned
866///
867/// Arguments:
868/// * `object`: The object to be pinned
869#[cfg(feature = "object_pinning")]
870pub fn unpin_object(object: ObjectReference) -> bool {
871 use crate::mmtk::SFT_MAP;
872 SFT_MAP
873 .get_checked(object.to_raw_address())
874 .unpin_object(object)
875}
876
877/// Check whether an object is currently pinned
878///
879/// Arguments:
880/// * `object`: The object to be checked
881#[cfg(feature = "object_pinning")]
882pub fn is_pinned(object: ObjectReference) -> bool {
883 use crate::mmtk::SFT_MAP;
884 SFT_MAP
885 .get_checked(object.to_raw_address())
886 .is_object_pinned(object)
887}
888
889/// Get an object that is ready for finalization. After each GC, if any registered object is not
890/// alive, this call will return one of the objects. MMTk will retain the liveness of those objects
891/// until they are popped through this call. Once an object is popped, it is the responsibility of
892/// the VM to make sure they are properly finalized before reclaimed by the GC. This call is non-blocking,
893/// and will return None if no object is ready for finalization.
894///
895/// Arguments:
896/// * `mmtk`: A reference to an MMTk instance.
897pub fn get_finalized_object<VM: VMBinding>(
898 mmtk: &'static MMTK<VM>,
899) -> Option<<VM::VMReferenceGlue as ReferenceGlue<VM>>::FinalizableType> {
900 if *mmtk.options.no_finalizer {
901 warn!("get_finalized_object() is called when no_finalizer = true");
902 }
903
904 mmtk.finalizable_processor
905 .lock()
906 .unwrap()
907 .get_ready_object()
908}
909
910/// Pop all the finalizers that were registered for finalization. The returned objects may or may not be ready for
911/// finalization. After this call, MMTk's finalizer processor should have no registered finalizer any more.
912///
913/// This is useful for some VMs which require all finalizable objects to be finalized on exit.
914///
915/// Arguments:
916/// * `mmtk`: A reference to an MMTk instance.
917pub fn get_all_finalizers<VM: VMBinding>(
918 mmtk: &'static MMTK<VM>,
919) -> Vec<<VM::VMReferenceGlue as ReferenceGlue<VM>>::FinalizableType> {
920 if *mmtk.options.no_finalizer {
921 warn!("get_all_finalizers() is called when no_finalizer = true");
922 }
923
924 mmtk.finalizable_processor
925 .lock()
926 .unwrap()
927 .get_all_finalizers()
928}
929
930/// Pop finalizers that were registered and associated with a certain object. The returned objects may or may not be ready for finalization.
931/// This is useful for some VMs that may manually execute finalize method for an object.
932///
933/// Arguments:
934/// * `mmtk`: A reference to an MMTk instance.
935/// * `object`: the given object that MMTk will pop its finalizers
936pub fn get_finalizers_for<VM: VMBinding>(
937 mmtk: &'static MMTK<VM>,
938 object: ObjectReference,
939) -> Vec<<VM::VMReferenceGlue as ReferenceGlue<VM>>::FinalizableType> {
940 if *mmtk.options.no_finalizer {
941 warn!("get_finalizers() is called when no_finalizer = true");
942 }
943
944 mmtk.finalizable_processor
945 .lock()
946 .unwrap()
947 .get_finalizers_for(object)
948}
949
950/// Get the number of workers. MMTk spawns worker threads for the 'threads' defined in the options.
951/// So the number of workers is derived from the threads option. Note the feature single_worker overwrites
952/// the threads option, and force one worker thread.
953///
954/// Arguments:
955/// * `mmtk`: A reference to an MMTk instance.
956pub fn num_of_workers<VM: VMBinding>(mmtk: &'static MMTK<VM>) -> usize {
957 mmtk.scheduler.num_workers()
958}
959
960/// Add a work packet to the given work bucket. Note that this simply adds the work packet to the given
961/// work bucket, and the scheduler will decide when to execute the work packet.
962///
963/// Arguments:
964/// * `mmtk`: A reference to an MMTk instance.
965/// * `bucket`: Which work bucket to add this packet to.
966/// * `packet`: The work packet to be added.
967pub fn add_work_packet<VM: VMBinding, W: GCWork<VM>>(
968 mmtk: &'static MMTK<VM>,
969 bucket: WorkBucketStage,
970 packet: W,
971) {
972 mmtk.scheduler.work_buckets[bucket].add(packet)
973}
974
975/// Bulk add a number of work packets to the given work bucket. Note that this simply adds the work packets
976/// to the given work bucket, and the scheduler will decide when to execute the work packets.
977///
978/// Arguments:
979/// * `mmtk`: A reference to an MMTk instance.
980/// * `bucket`: Which work bucket to add these packets to.
981/// * `packet`: The work packets to be added.
982pub fn add_work_packets<VM: VMBinding>(
983 mmtk: &'static MMTK<VM>,
984 bucket: WorkBucketStage,
985 packets: Vec<Box<dyn GCWork<VM>>>,
986) {
987 mmtk.scheduler.work_buckets[bucket].bulk_add(packets)
988}