mmtk/
memory_manager.rs

1//! VM-to-MMTk interface: safe Rust APIs.
2//!
3//! This module provides a safe Rust API for mmtk-core.
4//! We expect the VM binding to inherit and extend this API by:
5//! 1. adding their VM-specific functions
6//! 2. exposing the functions to native if necessary. And the VM binding needs to manage the unsafety
7//!    for exposing this safe API to FFI.
8//!
9//! For example, for mutators, this API provides a `Box<Mutator>`, and requires a `&mut Mutator` for allocation.
10//! A VM binding can borrow a mutable reference directly from `Box<Mutator>`, and call `alloc()`. Alternatively,
11//! it can turn the `Box` pointer to a native pointer (`*mut Mutator`), and forge a mut reference from the native
12//! pointer. Either way, the VM binding code needs to guarantee the safety.
13
14use crate::mmtk::MMTKBuilder;
15use crate::mmtk::MMTK;
16use crate::plan::AllocationSemantics;
17use crate::plan::{Mutator, MutatorContext};
18use crate::scheduler::WorkBucketStage;
19use crate::scheduler::{GCWork, GCWorker};
20use crate::util::alloc::allocator::AllocationOptions;
21use crate::util::alloc::allocators::AllocatorSelector;
22use crate::util::constants::LOG_BYTES_IN_PAGE;
23use crate::util::heap::layout::vm_layout::vm_layout;
24use crate::util::opaque_pointer::*;
25use crate::util::{Address, ObjectReference};
26use crate::vm::slot::MemorySlice;
27use crate::vm::ReferenceGlue;
28use crate::vm::VMBinding;
29
30use std::collections::HashMap;
31
32/// Initialize an MMTk instance. A VM should call this method after creating an [`crate::MMTK`]
33/// instance but before using any of the methods provided in MMTk (except `process()` and `process_bulk()`).
34///
35/// We expect a binding to ininitialize MMTk in the following steps:
36///
37/// 1. Create an [`crate::MMTKBuilder`] instance.
38/// 2. Set command line options for MMTKBuilder by [`crate::memory_manager::process`] or [`crate::memory_manager::process_bulk`].
39/// 3. Initialize MMTk by calling this function, `mmtk_init()`, and pass the builder earlier. This call will return an MMTK instance.
40///    Usually a binding store the MMTK instance statically as a singleton. We plan to allow multiple instances, but this is not yet fully
41///    supported. Currently we assume a binding will only need one MMTk instance. Note that GC is enabled by default and the binding should
42///    implement `VMCollection::is_collection_enabled()` if it requires that the GC should be disabled at a particular time.
43///
44/// This method will attempt to initialize the built-in `env_logger` if the Cargo feature "builtin_env_logger" is enabled (by default).
45/// If the VM would like to use its own logger, it should disable the default feature "builtin_env_logger" in `Cargo.toml`.
46///
47/// Note that, to allow MMTk to do GC properly, `initialize_collection()` needs to be called after this call when
48/// the VM's thread system is ready to spawn GC workers.
49///
50/// Note that this method returns a boxed pointer of MMTK, which means MMTk has a bound lifetime with the box pointer. However, some of our current APIs assume
51/// that MMTk has a static lifetime, which presents a mismatch with this API. We plan to address the lifetime issue in the future. At this point, we recommend a binding
52/// to 'expand' the lifetime for the boxed pointer to static. There could be multiple ways to achieve it: 1. `Box::leak()` will turn the box pointer to raw pointer
53/// which has static lifetime, 2. create MMTK as a lazily initialized static variable
54/// (see [what we do for our dummy binding](https://github.com/mmtk/mmtk-core/blob/master/vmbindings/dummyvm/src/lib.rs#L42))
55///
56/// Arguments:
57/// * `builder`: The reference to a MMTk builder.
58pub fn mmtk_init<VM: VMBinding>(builder: &MMTKBuilder) -> Box<MMTK<VM>> {
59    crate::util::logger::try_init();
60
61    #[cfg(all(feature = "perf_counter", target_os = "linux"))]
62    {
63        use std::fs::File;
64        use std::io::Read;
65        let mut status = File::open("/proc/self/status").unwrap();
66        let mut contents = String::new();
67        status.read_to_string(&mut contents).unwrap();
68        for line in contents.lines() {
69            let split: Vec<&str> = line.split('\t').collect();
70            if split[0] == "Threads:" {
71                let threads = split[1].parse::<i32>().unwrap();
72                if threads != 1 {
73                    warn!("Current process has {} threads, process-wide perf event measurement will only include child threads spawned from this thread", threads);
74                }
75            }
76        }
77    }
78
79    let mmtk = builder.build();
80    info!(
81        "Initialized MMTk with {:?} ({:?})",
82        *mmtk.options.plan, *mmtk.options.gc_trigger
83    );
84    #[cfg(feature = "extreme_assertions")]
85    warn!("The feature 'extreme_assertions' is enabled. MMTk will run expensive run-time checks. Slow performance should be expected.");
86    Box::new(mmtk)
87}
88
89/// Add an externally mmapped region to the VM space. A VM space can be set through MMTk options (`vm_space_start` and `vm_space_size`),
90/// and can also be set through this function call. A VM space can be discontiguous. This function can be called multiple times,
91/// and all the address ranges passed as arguments in the function will be considered as part of the VM space.
92/// Currently we do not allow removing regions from VM space.
93#[cfg(feature = "vm_space")]
94pub fn set_vm_space<VM: VMBinding>(mmtk: &'static mut MMTK<VM>, start: Address, size: usize) {
95    unsafe { mmtk.get_plan_mut() }
96        .base_mut()
97        .vm_space
98        .set_vm_region(start, size);
99}
100
101/// Request MMTk to create a mutator for the given thread. The ownership
102/// of returned boxed mutator is transferred to the binding, and the binding needs to take care of its
103/// lifetime. For performance reasons, A VM should store the returned mutator in a thread local storage
104/// that can be accessed efficiently. A VM may also copy and embed the mutator stucture to a thread-local data
105/// structure, and use that as a reference to the mutator (it is okay to drop the box once the content is copied --
106/// Note that `Mutator` may contain pointers so a binding may drop the box only if they perform a deep copy).
107///
108/// MMTk generally does not expect the runtime to create or destroy mutators during a pause. See also [`crate::vm::ActivePlan::mutators`].
109///
110/// Arguments:
111/// * `mmtk`: A reference to an MMTk instance.
112/// * `tls`: The thread that will be associated with the mutator.
113pub fn bind_mutator<VM: VMBinding>(
114    mmtk: &'static MMTK<VM>,
115    tls: VMMutatorThread,
116) -> Box<Mutator<VM>> {
117    let mutator = crate::plan::create_mutator(tls, mmtk);
118
119    const LOG_ALLOCATOR_MAPPING: bool = false;
120    if LOG_ALLOCATOR_MAPPING {
121        info!("{:?}", mutator.config);
122    }
123    mutator
124}
125
126/// Report to MMTk that a mutator is no longer needed. All mutator state is flushed before it is
127/// destroyed. A binding should not attempt to use the mutator after this call. MMTk will not
128/// attempt to reclaim the memory for the mutator, so a binding should properly reclaim the memory
129/// for the mutator after this call.
130///
131/// MMTk generally does not expect the runtime to create or destroy mutators during a pause. See also [`crate::vm::ActivePlan::mutators`].
132///
133/// Arguments:
134/// * `mutator`: A reference to the mutator to be destroyed.
135pub fn destroy_mutator<VM: VMBinding>(mutator: &mut Mutator<VM>) {
136    mutator.flush();
137    mutator.on_destroy();
138}
139
140/// Flush the mutator's local states.
141///
142/// Arguments:
143/// * `mutator`: A reference to the mutator.
144pub fn flush_mutator<VM: VMBinding>(mutator: &mut Mutator<VM>) {
145    mutator.flush()
146}
147
148/// Allocate memory for an object.
149///
150/// When the allocation is successful, it returns the starting address of the new object.  The
151/// memory range for the new object is `size` bytes starting from the returned address, and
152/// `RETURNED_ADDRESS + offset` is guaranteed to be aligned to the `align` parameter.  The returned
153/// address of a successful allocation will never be zero.
154///
155/// If MMTk fails to allocate memory, it will attempt a GC to free up some memory and retry the
156/// allocation.  After triggering GC, it will call [`crate::vm::Collection::block_for_gc`] to suspend
157/// the current thread that is allocating. Callers of `alloc` must be aware of this behavior.
158/// For example, JIT compilers that support
159/// precise stack scanning need to make the call site of `alloc` a GC-safe point by generating stack maps. See
160/// [`alloc_with_options`] if it is undesirable to trigger GC at this allocation site.
161///
162/// If MMTk has attempted at least one GC, and still cannot free up enough memory, it will call
163/// [`crate::vm::Collection::out_of_memory`] to inform the binding. The VM binding
164/// can implement that method to handle the out-of-memory event in a VM-specific way, including but
165/// not limited to throwing exceptions or errors. If [`crate::vm::Collection::out_of_memory`] returns
166/// normally without panicking or throwing exceptions, this function will return zero.
167///
168/// For performance reasons, a VM should implement the allocation fast-path on their side rather
169/// than just calling this function.
170///
171/// Arguments:
172/// * `mutator`: The mutator to perform this allocation request.
173/// * `size`: The number of bytes required for the object.
174/// * `align`: Required alignment for the object.
175/// * `offset`: Offset associated with the alignment.
176/// * `semantics`: The allocation semantic required for the allocation.
177pub fn alloc<VM: VMBinding>(
178    mutator: &mut Mutator<VM>,
179    size: usize,
180    align: usize,
181    offset: usize,
182    semantics: AllocationSemantics,
183) -> Address {
184    #[cfg(debug_assertions)]
185    crate::util::alloc::allocator::assert_allocation_args::<VM>(size, align, offset);
186
187    mutator.alloc(size, align, offset, semantics)
188}
189
190/// Allocate memory for an object.
191///
192/// This allocation function allows alternation to the allocation behaviors, specified by the
193/// [`crate::util::alloc::AllocationOptions`]. For example, one can allow
194/// overcommit the memory to go beyond the heap size without triggering a GC. This function can be
195/// used in certain cases where the runtime needs a different allocation behavior other than
196/// what the default [`alloc`] provides.
197///
198/// Arguments:
199/// * `mutator`: The mutator to perform this allocation request.
200/// * `size`: The number of bytes required for the object.
201/// * `align`: Required alignment for the object.
202/// * `offset`: Offset associated with the alignment.
203/// * `semantics`: The allocation semantic required for the allocation.
204/// * `options`: the allocation options to change the default allocation behavior for this request.
205pub fn alloc_with_options<VM: VMBinding>(
206    mutator: &mut Mutator<VM>,
207    size: usize,
208    align: usize,
209    offset: usize,
210    semantics: AllocationSemantics,
211    options: crate::util::alloc::allocator::AllocationOptions,
212) -> Address {
213    #[cfg(debug_assertions)]
214    crate::util::alloc::allocator::assert_allocation_args::<VM>(size, align, offset);
215
216    mutator.alloc_with_options(size, align, offset, semantics, options)
217}
218
219/// Invoke the allocation slow path of [`alloc`].
220/// Like [`alloc`], this function may trigger GC and call [`crate::vm::Collection::block_for_gc`] or
221/// [`crate::vm::Collection::out_of_memory`].  The caller needs to be aware of that.
222///
223/// *Notes*: This is only intended for use when a binding implements the fastpath on
224/// the binding side. When the binding handles fast path allocation and the fast path fails, it can use this
225/// method for slow path allocation. Calling before exhausting fast path allocaiton buffer will lead to bad
226/// performance.
227///
228/// Arguments:
229/// * `mutator`: The mutator to perform this allocation request.
230/// * `size`: The number of bytes required for the object.
231/// * `align`: Required alignment for the object.
232/// * `offset`: Offset associated with the alignment.
233/// * `semantics`: The allocation semantic required for the allocation.
234pub fn alloc_slow<VM: VMBinding>(
235    mutator: &mut Mutator<VM>,
236    size: usize,
237    align: usize,
238    offset: usize,
239    semantics: AllocationSemantics,
240) -> Address {
241    mutator.alloc_slow(size, align, offset, semantics)
242}
243
244/// Invoke the allocation slow path of [`alloc_with_options`].
245///
246/// Like [`alloc_with_options`], This allocation function allows alternation to the allocation behaviors, specified by the
247/// [`crate::util::alloc::AllocationOptions`]. For example, one can allow
248/// overcommit the memory to go beyond the heap size without triggering a GC. This function can be
249/// used in certain cases where the runtime needs a different allocation behavior other than
250/// what the default [`alloc`] provides.
251///
252/// Like [`alloc_slow`], this function is also only intended for use when a binding implements the
253/// fastpath on the binding side.
254///
255/// Arguments:
256/// * `mutator`: The mutator to perform this allocation request.
257/// * `size`: The number of bytes required for the object.
258/// * `align`: Required alignment for the object.
259/// * `offset`: Offset associated with the alignment.
260/// * `semantics`: The allocation semantic required for the allocation.
261pub fn alloc_slow_with_options<VM: VMBinding>(
262    mutator: &mut Mutator<VM>,
263    size: usize,
264    align: usize,
265    offset: usize,
266    semantics: AllocationSemantics,
267    options: AllocationOptions,
268) -> Address {
269    mutator.alloc_slow_with_options(size, align, offset, semantics, options)
270}
271
272/// Perform post-allocation actions, usually initializing object metadata. For many allocators none are
273/// required. For performance reasons, a VM should implement the post alloc fast-path on their side
274/// rather than just calling this function.
275///
276/// Arguments:
277/// * `mutator`: The mutator to perform post-alloc actions.
278/// * `refer`: The newly allocated object.
279/// * `bytes`: The size of the space allocated for the object (in bytes).
280/// * `semantics`: The allocation semantics used for the allocation.
281pub fn post_alloc<VM: VMBinding>(
282    mutator: &mut Mutator<VM>,
283    refer: ObjectReference,
284    bytes: usize,
285    semantics: AllocationSemantics,
286) {
287    mutator.post_alloc(refer, bytes, semantics);
288}
289
290/// The *subsuming* write barrier by MMTk. For performance reasons, a VM should implement the write barrier
291/// fast-path on their side rather than just calling this function.
292///
293/// For a correct barrier implementation, a VM binding needs to choose one of the following options:
294/// * Use subsuming barrier `object_reference_write`
295/// * Use both `object_reference_write_pre` and `object_reference_write_post`, or both, if the binding has difficulty delegating the store to mmtk-core with the subsuming barrier.
296/// * Implement fast-path on the VM side, and call the generic api `object_reference_write_slow` as barrier slow-path call.
297/// * Implement fast-path on the VM side, and do a specialized slow-path call.
298///
299/// Arguments:
300/// * `mutator`: The mutator for the current thread.
301/// * `src`: The modified source object.
302/// * `slot`: The location of the field to be modified.
303/// * `target`: The target for the write operation.
304///
305/// # Deprecated
306///
307/// This function needs to be redesigned.  Its current form has multiple issues.
308///
309/// -   It is only able to write non-null object references into the slot.  But dynamic language
310///     VMs may write non-reference values, such as tagged small integers, special values such as
311///     `null`, `undefined`, `true`, `false`, etc. into a field that previous contains an object
312///     reference.
313/// -   It relies on `slot.store` to write `target` into the slot, but `slot.store` is designed for
314///     forwarding references when an object is moved by GC, and is supposed to preserve tagged
315///     type information, the offset (if it is an interior pointer), etc.  A write barrier is
316///     associated to an assignment operation, which usually updates such information instead.
317///
318/// We will redesign a more general subsuming write barrier to address those problems and replace
319/// the current `object_reference_write`.  Before that happens, VM bindings should use
320/// `object_reference_write_pre` and `object_reference_write_post` instead.
321#[deprecated = "Use `object_reference_write_pre` and `object_reference_write_post` instead, until this function is redesigned"]
322pub fn object_reference_write<VM: VMBinding>(
323    mutator: &mut Mutator<VM>,
324    src: ObjectReference,
325    slot: VM::VMSlot,
326    target: ObjectReference,
327) {
328    mutator.barrier().object_reference_write(src, slot, target);
329}
330
331/// The write barrier by MMTk. This is a *pre* write barrier, which we expect a binding to call
332/// *before* it modifies an object. For performance reasons, a VM should implement the write barrier
333/// fast-path on their side rather than just calling this function.
334///
335/// For a correct barrier implementation, a VM binding needs to choose one of the following options:
336/// * Use subsuming barrier `object_reference_write`
337/// * Use both `object_reference_write_pre` and `object_reference_write_post`, or both, if the binding has difficulty delegating the store to mmtk-core with the subsuming barrier.
338/// * Implement fast-path on the VM side, and call the generic api `object_reference_write_slow` as barrier slow-path call.
339/// * Implement fast-path on the VM side, and do a specialized slow-path call.
340///
341/// Arguments:
342/// * `mutator`: The mutator for the current thread.
343/// * `src`: The modified source object.
344/// * `slot`: The location of the field to be modified.
345/// * `target`: The target for the write operation.  `None` if the slot did not hold an object
346///   reference before the write operation.  For example, the slot may be holding a `null`
347///   reference, a small integer, or special values such as `true`, `false`, `undefined`, etc.
348pub fn object_reference_write_pre<VM: VMBinding>(
349    mutator: &mut Mutator<VM>,
350    src: ObjectReference,
351    slot: VM::VMSlot,
352    target: Option<ObjectReference>,
353) {
354    mutator
355        .barrier()
356        .object_reference_write_pre(src, slot, target);
357}
358
359/// The write barrier by MMTk. This is a *post* write barrier, which we expect a binding to call
360/// *after* it modifies an object. For performance reasons, a VM should implement the write barrier
361/// fast-path on their side rather than just calling this function.
362///
363/// For a correct barrier implementation, a VM binding needs to choose one of the following options:
364/// * Use subsuming barrier `object_reference_write`
365/// * Use both `object_reference_write_pre` and `object_reference_write_post`, or both, if the binding has difficulty delegating the store to mmtk-core with the subsuming barrier.
366/// * Implement fast-path on the VM side, and call the generic api `object_reference_write_slow` as barrier slow-path call.
367/// * Implement fast-path on the VM side, and do a specialized slow-path call.
368///
369/// Arguments:
370/// * `mutator`: The mutator for the current thread.
371/// * `src`: The modified source object.
372/// * `slot`: The location of the field to be modified.
373/// * `target`: The target for the write operation.  `None` if the slot no longer hold an object
374///   reference after the write operation.  This may happen when writing a `null` reference, a small
375///   integers, or a special value such as`true`, `false`, `undefined`, etc., into the slot.
376pub fn object_reference_write_post<VM: VMBinding>(
377    mutator: &mut Mutator<VM>,
378    src: ObjectReference,
379    slot: VM::VMSlot,
380    target: Option<ObjectReference>,
381) {
382    mutator
383        .barrier()
384        .object_reference_write_post(src, slot, target);
385}
386
387/// The *subsuming* memory region copy barrier by MMTk.
388/// This is called when the VM tries to copy a piece of heap memory to another.
389/// The data within the slice does not necessarily to be all valid pointers,
390/// but the VM binding will be able to filter out non-reference values on slot iteration.
391///
392/// For VMs that performs a heap memory copy operation, for example OpenJDK's array copy operation, the binding needs to
393/// call `memory_region_copy*` APIs. Same as `object_reference_write*`, the binding can choose either the subsuming barrier,
394/// or the pre/post barrier.
395///
396/// Arguments:
397/// * `mutator`: The mutator for the current thread.
398/// * `src`: Source memory slice to copy from.
399/// * `dst`: Destination memory slice to copy to.
400///
401/// The size of `src` and `dst` shoule be equal
402pub fn memory_region_copy<VM: VMBinding>(
403    mutator: &'static mut Mutator<VM>,
404    src: VM::VMMemorySlice,
405    dst: VM::VMMemorySlice,
406) {
407    debug_assert_eq!(src.bytes(), dst.bytes());
408    mutator.barrier().memory_region_copy(src, dst);
409}
410
411/// The *generic* memory region copy *pre* barrier by MMTk, which we expect a binding to call
412/// *before* it performs memory copy.
413/// This is called when the VM tries to copy a piece of heap memory to another.
414/// The data within the slice does not necessarily to be all valid pointers,
415/// but the VM binding will be able to filter out non-reference values on slot iteration.
416///
417/// For VMs that performs a heap memory copy operation, for example OpenJDK's array copy operation, the binding needs to
418/// call `memory_region_copy*` APIs. Same as `object_reference_write*`, the binding can choose either the subsuming barrier,
419/// or the pre/post barrier.
420///
421/// Arguments:
422/// * `mutator`: The mutator for the current thread.
423/// * `src`: Source memory slice to copy from.
424/// * `dst`: Destination memory slice to copy to.
425///
426/// The size of `src` and `dst` shoule be equal
427pub fn memory_region_copy_pre<VM: VMBinding>(
428    mutator: &'static mut Mutator<VM>,
429    src: VM::VMMemorySlice,
430    dst: VM::VMMemorySlice,
431) {
432    debug_assert_eq!(src.bytes(), dst.bytes());
433    mutator.barrier().memory_region_copy_pre(src, dst);
434}
435
436/// The *generic* memory region copy *post* barrier by MMTk, which we expect a binding to call
437/// *after* it performs memory copy.
438/// This is called when the VM tries to copy a piece of heap memory to another.
439/// The data within the slice does not necessarily to be all valid pointers,
440/// but the VM binding will be able to filter out non-reference values on slot iteration.
441///
442/// For VMs that performs a heap memory copy operation, for example OpenJDK's array copy operation, the binding needs to
443/// call `memory_region_copy*` APIs. Same as `object_reference_write*`, the binding can choose either the subsuming barrier,
444/// or the pre/post barrier.
445///
446/// Arguments:
447/// * `mutator`: The mutator for the current thread.
448/// * `src`: Source memory slice to copy from.
449/// * `dst`: Destination memory slice to copy to.
450///
451/// The size of `src` and `dst` shoule be equal
452pub fn memory_region_copy_post<VM: VMBinding>(
453    mutator: &'static mut Mutator<VM>,
454    src: VM::VMMemorySlice,
455    dst: VM::VMMemorySlice,
456) {
457    debug_assert_eq!(src.bytes(), dst.bytes());
458    mutator.barrier().memory_region_copy_post(src, dst);
459}
460
461/// Return an AllocatorSelector for the given allocation semantic. This method is provided
462/// so that VM compilers may call it to help generate allocation fast-path.
463///
464/// Arguments:
465/// * `mmtk`: The reference to an MMTk instance.
466/// * `semantics`: The allocation semantic to query.
467pub fn get_allocator_mapping<VM: VMBinding>(
468    mmtk: &MMTK<VM>,
469    semantics: AllocationSemantics,
470) -> AllocatorSelector {
471    mmtk.get_plan().get_allocator_mapping()[semantics]
472}
473
474/// The standard malloc. MMTk either uses its own allocator, or forward the call to a
475/// library malloc.
476pub fn malloc(size: usize) -> Address {
477    crate::util::malloc::malloc(size)
478}
479
480/// The standard malloc except that with the feature `malloc_counted_size`, MMTk will count the allocated memory into its heap size.
481/// Thus the method requires a reference to an MMTk instance. MMTk either uses its own allocator, or forward the call to a
482/// library malloc.
483#[cfg(feature = "malloc_counted_size")]
484pub fn counted_malloc<VM: VMBinding>(mmtk: &MMTK<VM>, size: usize) -> Address {
485    crate::util::malloc::counted_malloc(mmtk, size)
486}
487
488/// The standard calloc.
489pub fn calloc(num: usize, size: usize) -> Address {
490    crate::util::malloc::calloc(num, size)
491}
492
493/// The standard calloc except that with the feature `malloc_counted_size`, MMTk will count the allocated memory into its heap size.
494/// Thus the method requires a reference to an MMTk instance.
495#[cfg(feature = "malloc_counted_size")]
496pub fn counted_calloc<VM: VMBinding>(mmtk: &MMTK<VM>, num: usize, size: usize) -> Address {
497    crate::util::malloc::counted_calloc(mmtk, num, size)
498}
499
500/// The standard realloc.
501pub fn realloc(addr: Address, size: usize) -> Address {
502    crate::util::malloc::realloc(addr, size)
503}
504
505/// The standard realloc except that with the feature `malloc_counted_size`, MMTk will count the allocated memory into its heap size.
506/// Thus the method requires a reference to an MMTk instance, and the size of the existing memory that will be reallocated.
507/// The `addr` in the arguments must be an address that is earlier returned from MMTk's `malloc()`, `calloc()` or `realloc()`.
508#[cfg(feature = "malloc_counted_size")]
509pub fn realloc_with_old_size<VM: VMBinding>(
510    mmtk: &MMTK<VM>,
511    addr: Address,
512    size: usize,
513    old_size: usize,
514) -> Address {
515    crate::util::malloc::realloc_with_old_size(mmtk, addr, size, old_size)
516}
517
518/// The standard free.
519/// The `addr` in the arguments must be an address that is earlier returned from MMTk's `malloc()`, `calloc()` or `realloc()`.
520pub fn free(addr: Address) {
521    crate::util::malloc::free(addr)
522}
523
524/// The standard free except that with the feature `malloc_counted_size`, MMTk will count the allocated memory into its heap size.
525/// Thus the method requires a reference to an MMTk instance, and the size of the memory to free.
526/// The `addr` in the arguments must be an address that is earlier returned from MMTk's `malloc()`, `calloc()` or `realloc()`.
527#[cfg(feature = "malloc_counted_size")]
528pub fn free_with_size<VM: VMBinding>(mmtk: &MMTK<VM>, addr: Address, old_size: usize) {
529    crate::util::malloc::free_with_size(mmtk, addr, old_size)
530}
531
532/// Get the current active malloc'd bytes. Here MMTk only accounts for bytes that are done through those 'counted malloc' functions.
533#[cfg(feature = "malloc_counted_size")]
534pub fn get_malloc_bytes<VM: VMBinding>(mmtk: &MMTK<VM>) -> usize {
535    use std::sync::atomic::Ordering;
536    mmtk.state.malloc_bytes.load(Ordering::SeqCst)
537}
538
539/// Poll for GC. MMTk will decide if a GC is needed. If so, this call will block
540/// the current thread, and trigger a GC. Otherwise, it will simply return.
541/// Usually a binding does not need to call this function. MMTk will poll for GC during its allocation.
542/// However, if a binding uses counted malloc (which won't poll for GC), they may want to poll for GC manually.
543/// This function should only be used by mutator threads.
544pub fn gc_poll<VM: VMBinding>(mmtk: &MMTK<VM>, tls: VMMutatorThread) {
545    use crate::vm::{ActivePlan, Collection};
546    debug_assert!(
547        VM::VMActivePlan::is_mutator(tls.0),
548        "gc_poll() can only be called by a mutator thread."
549    );
550
551    if mmtk.gc_trigger.poll(false, None) {
552        debug!("Collection required");
553        if !mmtk.state.is_initialized() {
554            panic!("GC is not allowed here: collection is not initialized (did you call initialize_collection()?).");
555        }
556        VM::VMCollection::block_for_gc(tls);
557    }
558}
559
560/// Wrapper for [`crate::scheduler::GCWorker::run`].
561pub fn start_worker<VM: VMBinding>(
562    mmtk: &'static MMTK<VM>,
563    tls: VMWorkerThread,
564    worker: Box<GCWorker<VM>>,
565) {
566    worker.run(tls, mmtk);
567}
568
569/// Wrapper for [`crate::mmtk::MMTK::initialize_collection`].
570pub fn initialize_collection<VM: VMBinding>(mmtk: &'static MMTK<VM>, tls: VMThread) {
571    mmtk.initialize_collection(tls);
572}
573
574/// Process MMTk run-time options. Returns true if the option is processed successfully.
575///
576/// Arguments:
577/// * `mmtk`: A reference to an MMTk instance.
578/// * `name`: The name of the option.
579/// * `value`: The value of the option (as a string).
580pub fn process(builder: &mut MMTKBuilder, name: &str, value: &str) -> bool {
581    builder.set_option(name, value)
582}
583
584/// Process multiple MMTk run-time options. Returns true if all the options are processed successfully.
585///
586/// Arguments:
587/// * `mmtk`: A reference to an MMTk instance.
588/// * `options`: a string that is key value pairs separated by white spaces, e.g. "threads=1 stress_factor=4096"
589pub fn process_bulk(builder: &mut MMTKBuilder, options: &str) -> bool {
590    builder.set_options_bulk_by_str(options)
591}
592
593/// Return used memory in bytes. MMTk accounts for memory in pages, thus this method always returns a value in
594/// page granularity.
595///
596/// Arguments:
597/// * `mmtk`: A reference to an MMTk instance.
598pub fn used_bytes<VM: VMBinding>(mmtk: &MMTK<VM>) -> usize {
599    mmtk.get_plan().get_used_pages() << LOG_BYTES_IN_PAGE
600}
601
602/// Return free memory in bytes. MMTk accounts for memory in pages, thus this method always returns a value in
603/// page granularity.
604///
605/// Arguments:
606/// * `mmtk`: A reference to an MMTk instance.
607pub fn free_bytes<VM: VMBinding>(mmtk: &MMTK<VM>) -> usize {
608    mmtk.get_plan().get_free_pages() << LOG_BYTES_IN_PAGE
609}
610
611/// Return a hash map for live bytes statistics in the last GC for each space.
612///
613/// MMTk usually accounts for memory in pages by each space.
614/// This is a special method that we count the size of every live object in a GC, and sum up the total bytes.
615/// We provide this method so users can use [`crate::LiveBytesStats`] to know if
616/// the space is fragmented.
617/// The value returned by this method is only updated when we finish tracing in a GC. A recommended timing
618/// to call this method is at the end of a GC (e.g. when the runtime is about to resume threads).
619pub fn live_bytes_in_last_gc<VM: VMBinding>(
620    mmtk: &MMTK<VM>,
621) -> HashMap<&'static str, crate::LiveBytesStats> {
622    mmtk.state.live_bytes_in_last_gc.borrow().clone()
623}
624
625/// Return the starting address of the heap. *Note that currently MMTk uses
626/// a fixed address range as heap.*
627pub fn starting_heap_address() -> Address {
628    vm_layout().heap_start
629}
630
631/// Return the ending address of the heap. *Note that currently MMTk uses
632/// a fixed address range as heap.*
633pub fn last_heap_address() -> Address {
634    vm_layout().heap_end
635}
636
637/// Return the total memory in bytes.
638///
639/// Arguments:
640/// * `mmtk`: A reference to an MMTk instance.
641pub fn total_bytes<VM: VMBinding>(mmtk: &MMTK<VM>) -> usize {
642    mmtk.get_plan().get_total_pages() << LOG_BYTES_IN_PAGE
643}
644
645/// The application code has requested a collection. This is just a GC hint, and
646/// we may ignore it.
647///
648/// Returns whether a GC was ran or not. If MMTk triggers a GC, this method will block the
649/// calling thread and return true when the GC finishes. Otherwise, this method returns
650/// false immediately.
651///
652/// Arguments:
653/// * `mmtk`: A reference to an MMTk instance.
654/// * `tls`: The thread that triggers this collection request.
655pub fn handle_user_collection_request<VM: VMBinding>(
656    mmtk: &MMTK<VM>,
657    tls: VMMutatorThread,
658) -> bool {
659    mmtk.handle_user_collection_request(tls, false, false)
660}
661
662/// Is the object alive?
663///
664/// Arguments:
665/// * `object`: The object reference to query.
666pub fn is_live_object(object: ObjectReference) -> bool {
667    object.is_live()
668}
669
670/// Check if `addr` is the raw address of an object reference to an MMTk object.
671///
672/// Concretely:
673/// 1.  Return `Some(object)` if `ObjectReference::from_raw_address(addr)` is a valid object
674///     reference to an object in any space in MMTk. `object` is the result of
675///     `ObjectReference::from_raw_address(addr)`.
676/// 2.  Return `None` otherwise.
677///
678/// This function is useful for conservative root scanning.  The VM can iterate through all words in
679/// a stack, filter out zeros, misaligned words, obviously out-of-range words (such as addresses
680/// greater than `0x0000_7fff_ffff_ffff` on Linux on x86_64), and use this function to deside if the
681/// word is really a reference.
682///
683/// This function does not handle internal pointers. If a binding may have internal pointers on
684/// the stack, and requires identifying the base reference for an internal pointer, they should use
685/// [`find_object_from_internal_pointer`] instead.
686///
687/// Note: This function has special behaviors if the VM space (enabled by the `vm_space` feature)
688/// is present.  See `crate::plan::global::BasePlan::vm_space`.
689///
690/// Argument:
691/// * `addr`: A non-zero word-aligned address.  Because the raw address of an `ObjectReference`
692///   cannot be zero and must be word-aligned, the caller must filter out zero and misaligned
693///   addresses before calling this function.  Otherwise the behavior is undefined.
694#[cfg(feature = "vo_bit")]
695pub fn is_mmtk_object(addr: Address) -> Option<ObjectReference> {
696    crate::util::is_mmtk_object::check_object_reference(addr)
697}
698
699/// Find if there is an object with VO bit set for the given address range.
700/// This should be used instead of [`crate::memory_manager::is_mmtk_object`] for conservative stack scanning if
701/// the binding may have internal pointers on the stack.
702///
703/// Note that, we only consider pointers that point to addresses that are equal to or greater than
704/// the raw addresss of the object's `ObjectReference`, and within the allocation as 'internal
705/// pointers'. To be precise, for each object ref `obj_ref`, internal pointers are in the range
706/// `[obj_ref.to_raw_address(), obj_ref.to_object_start() +
707/// ObjectModel::get_current_size(obj_ref))`. If a binding defines internal pointers differently,
708/// calling this method is undefined behavior. If this is the case for you, please submit an issue
709/// or engage us on Zulip to discuss more.
710///
711/// Note that, in the similar situation as [`crate::memory_manager::is_mmtk_object`], the binding should filter
712/// out obvious non-pointers (e.g. alignment check, bound check, etc) before calling this function to avoid unnecessary
713/// cost. This method is not cheap.
714///
715/// To minimize the cost, the user should also use a small `max_search_bytes`.
716///
717/// Note: This function has special behaviors if the VM space (enabled by the `vm_space` feature)
718/// is present.  See `crate::plan::global::BasePlan::vm_space`.
719///
720/// Argument:
721/// * `internal_ptr`: The address to start searching. We search backwards from this address (including this address) to find the base reference.
722/// * `max_search_bytes`: The maximum number of bytes we may search for an object with VO bit set. `internal_ptr - max_search_bytes` is not included.
723#[cfg(feature = "vo_bit")]
724pub fn find_object_from_internal_pointer(
725    internal_ptr: Address,
726    max_search_bytes: usize,
727) -> Option<ObjectReference> {
728    crate::util::is_mmtk_object::check_internal_reference(internal_ptr, max_search_bytes)
729}
730
731/// Return true if the `object` lies in a region of memory where
732/// -   only MMTk can allocate into, or
733/// -   only MMTk's delegated memory allocator (such as a malloc implementation) can allocate into
734///     for allocation requests from MMTk.
735///
736/// Return false otherwise.  This function never panics.
737///
738/// Particularly, if this function returns true, `object` cannot be an object allocated by the VM
739/// itself.
740///
741/// If this function returns true, the object cannot be allocate by the `malloc` function called by
742/// the VM, either. In other words, if the `MallocSpace` of MMTk called `malloc` to allocate the
743/// object for the VM in response to `memory_manager::alloc`, this function will return true; but
744/// if the VM directly called `malloc` to allocate the object, this function will return false.
745///
746/// If `is_mmtk_object(object.to_raw_address())` returns true, `is_in_mmtk_spaces(object)` must also
747/// return true.
748///
749/// This function is useful if an object reference in the VM can be either a pointer into the MMTk
750/// heap, or a pointer to non-MMTk objects.  If the VM has a pre-built boot image that contains
751/// primordial objects, or if the VM has its own allocator or uses any third-party allocators, or
752/// if the VM allows an object reference to point to native objects such as C++ objects, this
753/// function can distinguish between MMTk-allocated objects and other objects.
754///
755/// Note: This function has special behaviors if the VM space (enabled by the `vm_space` feature)
756/// is present.  See `crate::plan::global::BasePlan::vm_space`.
757///
758/// Arguments:
759/// * `object`: The object reference to query.
760pub fn is_in_mmtk_spaces(object: ObjectReference) -> bool {
761    use crate::mmtk::SFT_MAP;
762    SFT_MAP
763        .get_checked(object.to_raw_address())
764        .is_in_space(object)
765}
766
767/// Is the address in the mapped memory? The runtime can use this function to check
768/// if an address is mapped by MMTk. Note that this is different than is_in_mmtk_spaces().
769/// For malloc spaces, MMTk does not map those addresses (malloc does the mmap), so
770/// this function will return false, but is_in_mmtk_spaces will return true if the address
771/// is actually a valid object in malloc spaces. To check if an object is in our heap,
772/// the runtime should always use is_in_mmtk_spaces(). This function is_mapped_address()
773/// may get removed at some point.
774///
775/// Arguments:
776/// * `address`: The address to query.
777// TODO: Do we really need this function? Can a runtime always use is_mapped_object()?
778pub fn is_mapped_address(address: Address) -> bool {
779    address.is_mapped()
780}
781
782/// Add a reference to the list of weak references. A binding may
783/// call this either when a weak reference is created, or when a weak reference is traced during GC.
784///
785/// Arguments:
786/// * `mmtk`: A reference to an MMTk instance.
787/// * `reff`: The weak reference to add.
788pub fn add_weak_candidate<VM: VMBinding>(mmtk: &MMTK<VM>, reff: ObjectReference) {
789    mmtk.reference_processors.add_weak_candidate(reff);
790}
791
792/// Add a reference to the list of soft references. A binding may
793/// call this either when a weak reference is created, or when a weak reference is traced during GC.
794///
795/// Arguments:
796/// * `mmtk`: A reference to an MMTk instance.
797/// * `reff`: The soft reference to add.
798pub fn add_soft_candidate<VM: VMBinding>(mmtk: &MMTK<VM>, reff: ObjectReference) {
799    mmtk.reference_processors.add_soft_candidate(reff);
800}
801
802/// Add a reference to the list of phantom references. A binding may
803/// call this either when a weak reference is created, or when a weak reference is traced during GC.
804///
805/// Arguments:
806/// * `mmtk`: A reference to an MMTk instance.
807/// * `reff`: The phantom reference to add.
808pub fn add_phantom_candidate<VM: VMBinding>(mmtk: &MMTK<VM>, reff: ObjectReference) {
809    mmtk.reference_processors.add_phantom_candidate(reff);
810}
811
812/// Generic hook to allow benchmarks to be harnessed. We do a full heap
813/// GC, and then start recording statistics for MMTk.
814///
815/// Arguments:
816/// * `mmtk`: A reference to an MMTk instance.
817/// * `tls`: The thread that calls the function (and triggers a collection).
818pub fn harness_begin<VM: VMBinding>(mmtk: &MMTK<VM>, tls: VMMutatorThread) {
819    mmtk.harness_begin(tls);
820}
821
822/// Generic hook to allow benchmarks to be harnessed. We stop collecting
823/// statistics, and print stats values.
824///
825/// Arguments:
826/// * `mmtk`: A reference to an MMTk instance.
827pub fn harness_end<VM: VMBinding>(mmtk: &'static MMTK<VM>) {
828    mmtk.harness_end();
829}
830
831/// Register a finalizable object. MMTk will retain the liveness of
832/// the object even if it is not reachable from the program.
833/// Note that finalization upon exit is not supported.
834///
835/// Arguments:
836/// * `mmtk`: A reference to an MMTk instance
837/// * `object`: The object that has a finalizer
838pub fn add_finalizer<VM: VMBinding>(
839    mmtk: &'static MMTK<VM>,
840    object: <VM::VMReferenceGlue as ReferenceGlue<VM>>::FinalizableType,
841) {
842    if *mmtk.options.no_finalizer {
843        warn!("add_finalizer() is called when no_finalizer = true");
844    }
845
846    mmtk.finalizable_processor.lock().unwrap().add(object);
847}
848
849/// Pin an object. MMTk will make sure that the object does not move
850/// during GC. Note that action cannot happen in some plans, eg, semispace.
851/// It returns true if the pinning operation has been performed, i.e.,
852/// the object status changed from non-pinned to pinned
853///
854/// Arguments:
855/// * `object`: The object to be pinned
856#[cfg(feature = "object_pinning")]
857pub fn pin_object(object: ObjectReference) -> bool {
858    use crate::mmtk::SFT_MAP;
859    SFT_MAP
860        .get_checked(object.to_raw_address())
861        .pin_object(object)
862}
863
864/// Unpin an object.
865/// Returns true if the unpinning operation has been performed, i.e.,
866/// the object status changed from pinned to non-pinned
867///
868/// Arguments:
869/// * `object`: The object to be pinned
870#[cfg(feature = "object_pinning")]
871pub fn unpin_object(object: ObjectReference) -> bool {
872    use crate::mmtk::SFT_MAP;
873    SFT_MAP
874        .get_checked(object.to_raw_address())
875        .unpin_object(object)
876}
877
878/// Check whether an object is currently pinned
879///
880/// Arguments:
881/// * `object`: The object to be checked
882#[cfg(feature = "object_pinning")]
883pub fn is_pinned(object: ObjectReference) -> bool {
884    use crate::mmtk::SFT_MAP;
885    SFT_MAP
886        .get_checked(object.to_raw_address())
887        .is_object_pinned(object)
888}
889
890/// Get an object that is ready for finalization. After each GC, if any registered object is not
891/// alive, this call will return one of the objects. MMTk will retain the liveness of those objects
892/// until they are popped through this call. Once an object is popped, it is the responsibility of
893/// the VM to make sure they are properly finalized before reclaimed by the GC. This call is non-blocking,
894/// and will return None if no object is ready for finalization.
895///
896/// Arguments:
897/// * `mmtk`: A reference to an MMTk instance.
898pub fn get_finalized_object<VM: VMBinding>(
899    mmtk: &'static MMTK<VM>,
900) -> Option<<VM::VMReferenceGlue as ReferenceGlue<VM>>::FinalizableType> {
901    if *mmtk.options.no_finalizer {
902        warn!("get_finalized_object() is called when no_finalizer = true");
903    }
904
905    mmtk.finalizable_processor
906        .lock()
907        .unwrap()
908        .get_ready_object()
909}
910
911/// Pop all the finalizers that were registered for finalization. The returned objects may or may not be ready for
912/// finalization. After this call, MMTk's finalizer processor should have no registered finalizer any more.
913///
914/// This is useful for some VMs which require all finalizable objects to be finalized on exit.
915///
916/// Arguments:
917/// * `mmtk`: A reference to an MMTk instance.
918pub fn get_all_finalizers<VM: VMBinding>(
919    mmtk: &'static MMTK<VM>,
920) -> Vec<<VM::VMReferenceGlue as ReferenceGlue<VM>>::FinalizableType> {
921    if *mmtk.options.no_finalizer {
922        warn!("get_all_finalizers() is called when no_finalizer = true");
923    }
924
925    mmtk.finalizable_processor
926        .lock()
927        .unwrap()
928        .get_all_finalizers()
929}
930
931/// Pop finalizers that were registered and associated with a certain object. The returned objects may or may not be ready for finalization.
932/// This is useful for some VMs that may manually execute finalize method for an object.
933///
934/// Arguments:
935/// * `mmtk`: A reference to an MMTk instance.
936/// * `object`: the given object that MMTk will pop its finalizers
937pub fn get_finalizers_for<VM: VMBinding>(
938    mmtk: &'static MMTK<VM>,
939    object: ObjectReference,
940) -> Vec<<VM::VMReferenceGlue as ReferenceGlue<VM>>::FinalizableType> {
941    if *mmtk.options.no_finalizer {
942        warn!("get_finalizers() is called when no_finalizer = true");
943    }
944
945    mmtk.finalizable_processor
946        .lock()
947        .unwrap()
948        .get_finalizers_for(object)
949}
950
951/// Get the number of workers. MMTk spawns worker threads for the 'threads' defined in the options.
952/// So the number of workers is derived from the threads option. Note the feature single_worker overwrites
953/// the threads option, and force one worker thread.
954///
955/// Arguments:
956/// * `mmtk`: A reference to an MMTk instance.
957pub fn num_of_workers<VM: VMBinding>(mmtk: &'static MMTK<VM>) -> usize {
958    mmtk.scheduler.num_workers()
959}
960
961/// Add a work packet to the given work bucket. Note that this simply adds the work packet to the given
962/// work bucket, and the scheduler will decide when to execute the work packet.
963///
964/// Arguments:
965/// * `mmtk`: A reference to an MMTk instance.
966/// * `bucket`: Which work bucket to add this packet to.
967/// * `packet`: The work packet to be added.
968pub fn add_work_packet<VM: VMBinding, W: GCWork<VM>>(
969    mmtk: &'static MMTK<VM>,
970    bucket: WorkBucketStage,
971    packet: W,
972) {
973    mmtk.scheduler.work_buckets[bucket].add(packet)
974}
975
976/// Bulk add a number of work packets to the given work bucket. Note that this simply adds the work packets
977/// to the given work bucket, and the scheduler will decide when to execute the work packets.
978///
979/// Arguments:
980/// * `mmtk`: A reference to an MMTk instance.
981/// * `bucket`: Which work bucket to add these packets to.
982/// * `packet`: The work packets to be added.
983pub fn add_work_packets<VM: VMBinding>(
984    mmtk: &'static MMTK<VM>,
985    bucket: WorkBucketStage,
986    packets: Vec<Box<dyn GCWork<VM>>>,
987) {
988    mmtk.scheduler.work_buckets[bucket].bulk_add(packets)
989}