mmtk/
memory_manager.rs

1//! VM-to-MMTk interface: safe Rust APIs.
2//!
3//! This module provides a safe Rust API for mmtk-core.
4//! We expect the VM binding to inherit and extend this API by:
5//! 1. adding their VM-specific functions
6//! 2. exposing the functions to native if necessary. And the VM binding needs to manage the unsafety
7//!    for exposing this safe API to FFI.
8//!
9//! For example, for mutators, this API provides a `Box<Mutator>`, and requires a `&mut Mutator` for allocation.
10//! A VM binding can borrow a mutable reference directly from `Box<Mutator>`, and call `alloc()`. Alternatively,
11//! it can turn the `Box` pointer to a native pointer (`*mut Mutator`), and forge a mut reference from the native
12//! pointer. Either way, the VM binding code needs to guarantee the safety.
13
14use crate::mmtk::MMTKBuilder;
15use crate::mmtk::MMTK;
16use crate::plan::AllocationSemantics;
17use crate::plan::{Mutator, MutatorContext};
18use crate::scheduler::WorkBucketStage;
19use crate::scheduler::{GCWork, GCWorker};
20use crate::util::alloc::allocator::AllocationOptions;
21use crate::util::alloc::allocators::AllocatorSelector;
22use crate::util::constants::LOG_BYTES_IN_PAGE;
23use crate::util::heap::layout::vm_layout::vm_layout;
24use crate::util::opaque_pointer::*;
25use crate::util::{Address, ObjectReference};
26use crate::vm::slot::MemorySlice;
27use crate::vm::ReferenceGlue;
28use crate::vm::VMBinding;
29
30use std::collections::HashMap;
31
32/// Initialize an MMTk instance. A VM should call this method after creating an [`crate::MMTK`]
33/// instance but before using any of the methods provided in MMTk (except `process()` and `process_bulk()`).
34///
35/// We expect a binding to ininitialize MMTk in the following steps:
36///
37/// 1. Create an [`crate::MMTKBuilder`] instance.
38/// 2. Set command line options for MMTKBuilder by [`crate::memory_manager::process`] or [`crate::memory_manager::process_bulk`].
39/// 3. Initialize MMTk by calling this function, `mmtk_init()`, and pass the builder earlier. This call will return an MMTK instance.
40///    Usually a binding store the MMTK instance statically as a singleton. We plan to allow multiple instances, but this is not yet fully
41///    supported. Currently we assume a binding will only need one MMTk instance. Note that GC is enabled by default and the binding should
42///    implement `VMCollection::is_collection_enabled()` if it requires that the GC should be disabled at a particular time.
43///
44/// This method will attempt to initialize the built-in `env_logger` if the Cargo feature "builtin_env_logger" is enabled (by default).
45/// If the VM would like to use its own logger, it should disable the default feature "builtin_env_logger" in `Cargo.toml`.
46///
47/// Note that, to allow MMTk to do GC properly, `initialize_collection()` needs to be called after this call when
48/// the VM's thread system is ready to spawn GC workers.
49///
50/// Note that this method returns a boxed pointer of MMTK, which means MMTk has a bound lifetime with the box pointer. However, some of our current APIs assume
51/// that MMTk has a static lifetime, which presents a mismatch with this API. We plan to address the lifetime issue in the future. At this point, we recommend a binding
52/// to 'expand' the lifetime for the boxed pointer to static. There could be multiple ways to achieve it: 1. `Box::leak()` will turn the box pointer to raw pointer
53/// which has static lifetime, 2. create MMTK as a lazily initialized static variable
54/// (see [what we do for our dummy binding](https://github.com/mmtk/mmtk-core/blob/master/vmbindings/dummyvm/src/lib.rs#L42))
55///
56/// Arguments:
57/// * `builder`: The reference to a MMTk builder.
58pub fn mmtk_init<VM: VMBinding>(builder: &MMTKBuilder) -> Box<MMTK<VM>> {
59    crate::util::logger::try_init();
60    #[cfg(all(feature = "perf_counter", target_os = "linux"))]
61    {
62        use std::fs::File;
63        use std::io::Read;
64        let mut status = File::open("/proc/self/status").unwrap();
65        let mut contents = String::new();
66        status.read_to_string(&mut contents).unwrap();
67        for line in contents.lines() {
68            let split: Vec<&str> = line.split('\t').collect();
69            if split[0] == "Threads:" {
70                let threads = split[1].parse::<i32>().unwrap();
71                if threads != 1 {
72                    warn!("Current process has {} threads, process-wide perf event measurement will only include child threads spawned from this thread", threads);
73                }
74            }
75        }
76    }
77    let mmtk = builder.build();
78
79    info!(
80        "Initialized MMTk with {:?} ({:?})",
81        *mmtk.options.plan, *mmtk.options.gc_trigger
82    );
83    #[cfg(feature = "extreme_assertions")]
84    warn!("The feature 'extreme_assertions' is enabled. MMTk will run expensive run-time checks. Slow performance should be expected.");
85    Box::new(mmtk)
86}
87
88/// Add an externally mmapped region to the VM space. A VM space can be set through MMTk options (`vm_space_start` and `vm_space_size`),
89/// and can also be set through this function call. A VM space can be discontiguous. This function can be called multiple times,
90/// and all the address ranges passed as arguments in the function will be considered as part of the VM space.
91/// Currently we do not allow removing regions from VM space.
92#[cfg(feature = "vm_space")]
93pub fn set_vm_space<VM: VMBinding>(mmtk: &'static mut MMTK<VM>, start: Address, size: usize) {
94    unsafe { mmtk.get_plan_mut() }
95        .base_mut()
96        .vm_space
97        .set_vm_region(start, size);
98}
99
100/// Request MMTk to create a mutator for the given thread. The ownership
101/// of returned boxed mutator is transferred to the binding, and the binding needs to take care of its
102/// lifetime. For performance reasons, A VM should store the returned mutator in a thread local storage
103/// that can be accessed efficiently. A VM may also copy and embed the mutator stucture to a thread-local data
104/// structure, and use that as a reference to the mutator (it is okay to drop the box once the content is copied --
105/// Note that `Mutator` may contain pointers so a binding may drop the box only if they perform a deep copy).
106///
107/// Arguments:
108/// * `mmtk`: A reference to an MMTk instance.
109/// * `tls`: The thread that will be associated with the mutator.
110pub fn bind_mutator<VM: VMBinding>(
111    mmtk: &'static MMTK<VM>,
112    tls: VMMutatorThread,
113) -> Box<Mutator<VM>> {
114    let mutator = crate::plan::create_mutator(tls, mmtk);
115
116    const LOG_ALLOCATOR_MAPPING: bool = false;
117    if LOG_ALLOCATOR_MAPPING {
118        info!("{:?}", mutator.config);
119    }
120    mutator
121}
122
123/// Report to MMTk that a mutator is no longer needed. All mutator state is flushed before it is
124/// destroyed. A binding should not attempt to use the mutator after this call. MMTk will not
125/// attempt to reclaim the memory for the mutator, so a binding should properly reclaim the memory
126/// for the mutator after this call.
127///
128/// Arguments:
129/// * `mutator`: A reference to the mutator to be destroyed.
130pub fn destroy_mutator<VM: VMBinding>(mutator: &mut Mutator<VM>) {
131    mutator.flush();
132    mutator.on_destroy();
133}
134
135/// Flush the mutator's local states.
136///
137/// Arguments:
138/// * `mutator`: A reference to the mutator.
139pub fn flush_mutator<VM: VMBinding>(mutator: &mut Mutator<VM>) {
140    mutator.flush()
141}
142
143/// Allocate memory for an object.
144///
145/// When the allocation is successful, it returns the starting address of the new object.  The
146/// memory range for the new object is `size` bytes starting from the returned address, and
147/// `RETURNED_ADDRESS + offset` is guaranteed to be aligned to the `align` parameter.  The returned
148/// address of a successful allocation will never be zero.
149///
150/// If MMTk fails to allocate memory, it will attempt a GC to free up some memory and retry the
151/// allocation.  After triggering GC, it will call [`crate::vm::Collection::block_for_gc`] to suspend
152/// the current thread that is allocating. Callers of `alloc` must be aware of this behavior.
153/// For example, JIT compilers that support
154/// precise stack scanning need to make the call site of `alloc` a GC-safe point by generating stack maps. See
155/// [`alloc_with_options`] if it is undesirable to trigger GC at this allocation site.
156///
157/// If MMTk has attempted at least one GC, and still cannot free up enough memory, it will call
158/// [`crate::vm::Collection::out_of_memory`] to inform the binding. The VM binding
159/// can implement that method to handle the out-of-memory event in a VM-specific way, including but
160/// not limited to throwing exceptions or errors. If [`crate::vm::Collection::out_of_memory`] returns
161/// normally without panicking or throwing exceptions, this function will return zero.
162///
163/// For performance reasons, a VM should implement the allocation fast-path on their side rather
164/// than just calling this function.
165///
166/// Arguments:
167/// * `mutator`: The mutator to perform this allocation request.
168/// * `size`: The number of bytes required for the object.
169/// * `align`: Required alignment for the object.
170/// * `offset`: Offset associated with the alignment.
171/// * `semantics`: The allocation semantic required for the allocation.
172pub fn alloc<VM: VMBinding>(
173    mutator: &mut Mutator<VM>,
174    size: usize,
175    align: usize,
176    offset: usize,
177    semantics: AllocationSemantics,
178) -> Address {
179    #[cfg(debug_assertions)]
180    crate::util::alloc::allocator::assert_allocation_args::<VM>(size, align, offset);
181
182    mutator.alloc(size, align, offset, semantics)
183}
184
185/// Allocate memory for an object.
186///
187/// This allocation function allows alternation to the allocation behaviors, specified by the
188/// [`crate::util::alloc::AllocationOptions`]. For example, one can allow
189/// overcommit the memory to go beyond the heap size without triggering a GC. This function can be
190/// used in certain cases where the runtime needs a different allocation behavior other than
191/// what the default [`alloc`] provides.
192///
193/// Arguments:
194/// * `mutator`: The mutator to perform this allocation request.
195/// * `size`: The number of bytes required for the object.
196/// * `align`: Required alignment for the object.
197/// * `offset`: Offset associated with the alignment.
198/// * `semantics`: The allocation semantic required for the allocation.
199/// * `options`: the allocation options to change the default allocation behavior for this request.
200pub fn alloc_with_options<VM: VMBinding>(
201    mutator: &mut Mutator<VM>,
202    size: usize,
203    align: usize,
204    offset: usize,
205    semantics: AllocationSemantics,
206    options: crate::util::alloc::allocator::AllocationOptions,
207) -> Address {
208    #[cfg(debug_assertions)]
209    crate::util::alloc::allocator::assert_allocation_args::<VM>(size, align, offset);
210
211    mutator.alloc_with_options(size, align, offset, semantics, options)
212}
213
214/// Invoke the allocation slow path of [`alloc`].
215/// Like [`alloc`], this function may trigger GC and call [`crate::vm::Collection::block_for_gc`] or
216/// [`crate::vm::Collection::out_of_memory`].  The caller needs to be aware of that.
217///
218/// *Notes*: This is only intended for use when a binding implements the fastpath on
219/// the binding side. When the binding handles fast path allocation and the fast path fails, it can use this
220/// method for slow path allocation. Calling before exhausting fast path allocaiton buffer will lead to bad
221/// performance.
222///
223/// Arguments:
224/// * `mutator`: The mutator to perform this allocation request.
225/// * `size`: The number of bytes required for the object.
226/// * `align`: Required alignment for the object.
227/// * `offset`: Offset associated with the alignment.
228/// * `semantics`: The allocation semantic required for the allocation.
229pub fn alloc_slow<VM: VMBinding>(
230    mutator: &mut Mutator<VM>,
231    size: usize,
232    align: usize,
233    offset: usize,
234    semantics: AllocationSemantics,
235) -> Address {
236    mutator.alloc_slow(size, align, offset, semantics)
237}
238
239/// Invoke the allocation slow path of [`alloc_with_options`].
240///
241/// Like [`alloc_with_options`], This allocation function allows alternation to the allocation behaviors, specified by the
242/// [`crate::util::alloc::AllocationOptions`]. For example, one can allow
243/// overcommit the memory to go beyond the heap size without triggering a GC. This function can be
244/// used in certain cases where the runtime needs a different allocation behavior other than
245/// what the default [`alloc`] provides.
246///
247/// Like [`alloc_slow`], this function is also only intended for use when a binding implements the
248/// fastpath on the binding side.
249///
250/// Arguments:
251/// * `mutator`: The mutator to perform this allocation request.
252/// * `size`: The number of bytes required for the object.
253/// * `align`: Required alignment for the object.
254/// * `offset`: Offset associated with the alignment.
255/// * `semantics`: The allocation semantic required for the allocation.
256pub fn alloc_slow_with_options<VM: VMBinding>(
257    mutator: &mut Mutator<VM>,
258    size: usize,
259    align: usize,
260    offset: usize,
261    semantics: AllocationSemantics,
262    options: AllocationOptions,
263) -> Address {
264    mutator.alloc_slow_with_options(size, align, offset, semantics, options)
265}
266
267/// Perform post-allocation actions, usually initializing object metadata. For many allocators none are
268/// required. For performance reasons, a VM should implement the post alloc fast-path on their side
269/// rather than just calling this function.
270///
271/// Arguments:
272/// * `mutator`: The mutator to perform post-alloc actions.
273/// * `refer`: The newly allocated object.
274/// * `bytes`: The size of the space allocated for the object (in bytes).
275/// * `semantics`: The allocation semantics used for the allocation.
276pub fn post_alloc<VM: VMBinding>(
277    mutator: &mut Mutator<VM>,
278    refer: ObjectReference,
279    bytes: usize,
280    semantics: AllocationSemantics,
281) {
282    mutator.post_alloc(refer, bytes, semantics);
283}
284
285/// The *subsuming* write barrier by MMTk. For performance reasons, a VM should implement the write barrier
286/// fast-path on their side rather than just calling this function.
287///
288/// For a correct barrier implementation, a VM binding needs to choose one of the following options:
289/// * Use subsuming barrier `object_reference_write`
290/// * Use both `object_reference_write_pre` and `object_reference_write_post`, or both, if the binding has difficulty delegating the store to mmtk-core with the subsuming barrier.
291/// * Implement fast-path on the VM side, and call the generic api `object_reference_write_slow` as barrier slow-path call.
292/// * Implement fast-path on the VM side, and do a specialized slow-path call.
293///
294/// Arguments:
295/// * `mutator`: The mutator for the current thread.
296/// * `src`: The modified source object.
297/// * `slot`: The location of the field to be modified.
298/// * `target`: The target for the write operation.
299///
300/// # Deprecated
301///
302/// This function needs to be redesigned.  Its current form has multiple issues.
303///
304/// -   It is only able to write non-null object references into the slot.  But dynamic language
305///     VMs may write non-reference values, such as tagged small integers, special values such as
306///     `null`, `undefined`, `true`, `false`, etc. into a field that previous contains an object
307///     reference.
308/// -   It relies on `slot.store` to write `target` into the slot, but `slot.store` is designed for
309///     forwarding references when an object is moved by GC, and is supposed to preserve tagged
310///     type information, the offset (if it is an interior pointer), etc.  A write barrier is
311///     associated to an assignment operation, which usually updates such information instead.
312///
313/// We will redesign a more general subsuming write barrier to address those problems and replace
314/// the current `object_reference_write`.  Before that happens, VM bindings should use
315/// `object_reference_write_pre` and `object_reference_write_post` instead.
316#[deprecated = "Use `object_reference_write_pre` and `object_reference_write_post` instead, until this function is redesigned"]
317pub fn object_reference_write<VM: VMBinding>(
318    mutator: &mut Mutator<VM>,
319    src: ObjectReference,
320    slot: VM::VMSlot,
321    target: ObjectReference,
322) {
323    mutator.barrier().object_reference_write(src, slot, target);
324}
325
326/// The write barrier by MMTk. This is a *pre* write barrier, which we expect a binding to call
327/// *before* it modifies an object. For performance reasons, a VM should implement the write barrier
328/// fast-path on their side rather than just calling this function.
329///
330/// For a correct barrier implementation, a VM binding needs to choose one of the following options:
331/// * Use subsuming barrier `object_reference_write`
332/// * Use both `object_reference_write_pre` and `object_reference_write_post`, or both, if the binding has difficulty delegating the store to mmtk-core with the subsuming barrier.
333/// * Implement fast-path on the VM side, and call the generic api `object_reference_write_slow` as barrier slow-path call.
334/// * Implement fast-path on the VM side, and do a specialized slow-path call.
335///
336/// Arguments:
337/// * `mutator`: The mutator for the current thread.
338/// * `src`: The modified source object.
339/// * `slot`: The location of the field to be modified.
340/// * `target`: The target for the write operation.  `None` if the slot did not hold an object
341///   reference before the write operation.  For example, the slot may be holding a `null`
342///   reference, a small integer, or special values such as `true`, `false`, `undefined`, etc.
343pub fn object_reference_write_pre<VM: VMBinding>(
344    mutator: &mut Mutator<VM>,
345    src: ObjectReference,
346    slot: VM::VMSlot,
347    target: Option<ObjectReference>,
348) {
349    mutator
350        .barrier()
351        .object_reference_write_pre(src, slot, target);
352}
353
354/// The write barrier by MMTk. This is a *post* write barrier, which we expect a binding to call
355/// *after* it modifies an object. For performance reasons, a VM should implement the write barrier
356/// fast-path on their side rather than just calling this function.
357///
358/// For a correct barrier implementation, a VM binding needs to choose one of the following options:
359/// * Use subsuming barrier `object_reference_write`
360/// * Use both `object_reference_write_pre` and `object_reference_write_post`, or both, if the binding has difficulty delegating the store to mmtk-core with the subsuming barrier.
361/// * Implement fast-path on the VM side, and call the generic api `object_reference_write_slow` as barrier slow-path call.
362/// * Implement fast-path on the VM side, and do a specialized slow-path call.
363///
364/// Arguments:
365/// * `mutator`: The mutator for the current thread.
366/// * `src`: The modified source object.
367/// * `slot`: The location of the field to be modified.
368/// * `target`: The target for the write operation.  `None` if the slot no longer hold an object
369///   reference after the write operation.  This may happen when writing a `null` reference, a small
370///   integers, or a special value such as`true`, `false`, `undefined`, etc., into the slot.
371pub fn object_reference_write_post<VM: VMBinding>(
372    mutator: &mut Mutator<VM>,
373    src: ObjectReference,
374    slot: VM::VMSlot,
375    target: Option<ObjectReference>,
376) {
377    mutator
378        .barrier()
379        .object_reference_write_post(src, slot, target);
380}
381
382/// The *subsuming* memory region copy barrier by MMTk.
383/// This is called when the VM tries to copy a piece of heap memory to another.
384/// The data within the slice does not necessarily to be all valid pointers,
385/// but the VM binding will be able to filter out non-reference values on slot iteration.
386///
387/// For VMs that performs a heap memory copy operation, for example OpenJDK's array copy operation, the binding needs to
388/// call `memory_region_copy*` APIs. Same as `object_reference_write*`, the binding can choose either the subsuming barrier,
389/// or the pre/post barrier.
390///
391/// Arguments:
392/// * `mutator`: The mutator for the current thread.
393/// * `src`: Source memory slice to copy from.
394/// * `dst`: Destination memory slice to copy to.
395///
396/// The size of `src` and `dst` shoule be equal
397pub fn memory_region_copy<VM: VMBinding>(
398    mutator: &'static mut Mutator<VM>,
399    src: VM::VMMemorySlice,
400    dst: VM::VMMemorySlice,
401) {
402    debug_assert_eq!(src.bytes(), dst.bytes());
403    mutator.barrier().memory_region_copy(src, dst);
404}
405
406/// The *generic* memory region copy *pre* barrier by MMTk, which we expect a binding to call
407/// *before* it performs memory copy.
408/// This is called when the VM tries to copy a piece of heap memory to another.
409/// The data within the slice does not necessarily to be all valid pointers,
410/// but the VM binding will be able to filter out non-reference values on slot iteration.
411///
412/// For VMs that performs a heap memory copy operation, for example OpenJDK's array copy operation, the binding needs to
413/// call `memory_region_copy*` APIs. Same as `object_reference_write*`, the binding can choose either the subsuming barrier,
414/// or the pre/post barrier.
415///
416/// Arguments:
417/// * `mutator`: The mutator for the current thread.
418/// * `src`: Source memory slice to copy from.
419/// * `dst`: Destination memory slice to copy to.
420///
421/// The size of `src` and `dst` shoule be equal
422pub fn memory_region_copy_pre<VM: VMBinding>(
423    mutator: &'static mut Mutator<VM>,
424    src: VM::VMMemorySlice,
425    dst: VM::VMMemorySlice,
426) {
427    debug_assert_eq!(src.bytes(), dst.bytes());
428    mutator.barrier().memory_region_copy_pre(src, dst);
429}
430
431/// The *generic* memory region copy *post* barrier by MMTk, which we expect a binding to call
432/// *after* it performs memory copy.
433/// This is called when the VM tries to copy a piece of heap memory to another.
434/// The data within the slice does not necessarily to be all valid pointers,
435/// but the VM binding will be able to filter out non-reference values on slot iteration.
436///
437/// For VMs that performs a heap memory copy operation, for example OpenJDK's array copy operation, the binding needs to
438/// call `memory_region_copy*` APIs. Same as `object_reference_write*`, the binding can choose either the subsuming barrier,
439/// or the pre/post barrier.
440///
441/// Arguments:
442/// * `mutator`: The mutator for the current thread.
443/// * `src`: Source memory slice to copy from.
444/// * `dst`: Destination memory slice to copy to.
445///
446/// The size of `src` and `dst` shoule be equal
447pub fn memory_region_copy_post<VM: VMBinding>(
448    mutator: &'static mut Mutator<VM>,
449    src: VM::VMMemorySlice,
450    dst: VM::VMMemorySlice,
451) {
452    debug_assert_eq!(src.bytes(), dst.bytes());
453    mutator.barrier().memory_region_copy_post(src, dst);
454}
455
456/// Return an AllocatorSelector for the given allocation semantic. This method is provided
457/// so that VM compilers may call it to help generate allocation fast-path.
458///
459/// Arguments:
460/// * `mmtk`: The reference to an MMTk instance.
461/// * `semantics`: The allocation semantic to query.
462pub fn get_allocator_mapping<VM: VMBinding>(
463    mmtk: &MMTK<VM>,
464    semantics: AllocationSemantics,
465) -> AllocatorSelector {
466    mmtk.get_plan().get_allocator_mapping()[semantics]
467}
468
469/// The standard malloc. MMTk either uses its own allocator, or forward the call to a
470/// library malloc.
471pub fn malloc(size: usize) -> Address {
472    crate::util::malloc::malloc(size)
473}
474
475/// The standard malloc except that with the feature `malloc_counted_size`, MMTk will count the allocated memory into its heap size.
476/// Thus the method requires a reference to an MMTk instance. MMTk either uses its own allocator, or forward the call to a
477/// library malloc.
478#[cfg(feature = "malloc_counted_size")]
479pub fn counted_malloc<VM: VMBinding>(mmtk: &MMTK<VM>, size: usize) -> Address {
480    crate::util::malloc::counted_malloc(mmtk, size)
481}
482
483/// The standard calloc.
484pub fn calloc(num: usize, size: usize) -> Address {
485    crate::util::malloc::calloc(num, size)
486}
487
488/// The standard calloc except that with the feature `malloc_counted_size`, MMTk will count the allocated memory into its heap size.
489/// Thus the method requires a reference to an MMTk instance.
490#[cfg(feature = "malloc_counted_size")]
491pub fn counted_calloc<VM: VMBinding>(mmtk: &MMTK<VM>, num: usize, size: usize) -> Address {
492    crate::util::malloc::counted_calloc(mmtk, num, size)
493}
494
495/// The standard realloc.
496pub fn realloc(addr: Address, size: usize) -> Address {
497    crate::util::malloc::realloc(addr, size)
498}
499
500/// The standard realloc except that with the feature `malloc_counted_size`, MMTk will count the allocated memory into its heap size.
501/// Thus the method requires a reference to an MMTk instance, and the size of the existing memory that will be reallocated.
502/// The `addr` in the arguments must be an address that is earlier returned from MMTk's `malloc()`, `calloc()` or `realloc()`.
503#[cfg(feature = "malloc_counted_size")]
504pub fn realloc_with_old_size<VM: VMBinding>(
505    mmtk: &MMTK<VM>,
506    addr: Address,
507    size: usize,
508    old_size: usize,
509) -> Address {
510    crate::util::malloc::realloc_with_old_size(mmtk, addr, size, old_size)
511}
512
513/// The standard free.
514/// The `addr` in the arguments must be an address that is earlier returned from MMTk's `malloc()`, `calloc()` or `realloc()`.
515pub fn free(addr: Address) {
516    crate::util::malloc::free(addr)
517}
518
519/// The standard free except that with the feature `malloc_counted_size`, MMTk will count the allocated memory into its heap size.
520/// Thus the method requires a reference to an MMTk instance, and the size of the memory to free.
521/// The `addr` in the arguments must be an address that is earlier returned from MMTk's `malloc()`, `calloc()` or `realloc()`.
522#[cfg(feature = "malloc_counted_size")]
523pub fn free_with_size<VM: VMBinding>(mmtk: &MMTK<VM>, addr: Address, old_size: usize) {
524    crate::util::malloc::free_with_size(mmtk, addr, old_size)
525}
526
527/// Get the current active malloc'd bytes. Here MMTk only accounts for bytes that are done through those 'counted malloc' functions.
528#[cfg(feature = "malloc_counted_size")]
529pub fn get_malloc_bytes<VM: VMBinding>(mmtk: &MMTK<VM>) -> usize {
530    use std::sync::atomic::Ordering;
531    mmtk.state.malloc_bytes.load(Ordering::SeqCst)
532}
533
534/// Poll for GC. MMTk will decide if a GC is needed. If so, this call will block
535/// the current thread, and trigger a GC. Otherwise, it will simply return.
536/// Usually a binding does not need to call this function. MMTk will poll for GC during its allocation.
537/// However, if a binding uses counted malloc (which won't poll for GC), they may want to poll for GC manually.
538/// This function should only be used by mutator threads.
539pub fn gc_poll<VM: VMBinding>(mmtk: &MMTK<VM>, tls: VMMutatorThread) {
540    use crate::vm::{ActivePlan, Collection};
541    debug_assert!(
542        VM::VMActivePlan::is_mutator(tls.0),
543        "gc_poll() can only be called by a mutator thread."
544    );
545
546    if mmtk.gc_trigger.poll(false, None) {
547        debug!("Collection required");
548        if !mmtk.state.is_initialized() {
549            panic!("GC is not allowed here: collection is not initialized (did you call initialize_collection()?).");
550        }
551        VM::VMCollection::block_for_gc(tls);
552    }
553}
554
555/// Wrapper for [`crate::scheduler::GCWorker::run`].
556pub fn start_worker<VM: VMBinding>(
557    mmtk: &'static MMTK<VM>,
558    tls: VMWorkerThread,
559    worker: Box<GCWorker<VM>>,
560) {
561    worker.run(tls, mmtk);
562}
563
564/// Wrapper for [`crate::mmtk::MMTK::initialize_collection`].
565pub fn initialize_collection<VM: VMBinding>(mmtk: &'static MMTK<VM>, tls: VMThread) {
566    mmtk.initialize_collection(tls);
567}
568
569/// Process MMTk run-time options. Returns true if the option is processed successfully.
570///
571/// Arguments:
572/// * `mmtk`: A reference to an MMTk instance.
573/// * `name`: The name of the option.
574/// * `value`: The value of the option (as a string).
575pub fn process(builder: &mut MMTKBuilder, name: &str, value: &str) -> bool {
576    builder.set_option(name, value)
577}
578
579/// Process multiple MMTk run-time options. Returns true if all the options are processed successfully.
580///
581/// Arguments:
582/// * `mmtk`: A reference to an MMTk instance.
583/// * `options`: a string that is key value pairs separated by white spaces, e.g. "threads=1 stress_factor=4096"
584pub fn process_bulk(builder: &mut MMTKBuilder, options: &str) -> bool {
585    builder.set_options_bulk_by_str(options)
586}
587
588/// Return used memory in bytes. MMTk accounts for memory in pages, thus this method always returns a value in
589/// page granularity.
590///
591/// Arguments:
592/// * `mmtk`: A reference to an MMTk instance.
593pub fn used_bytes<VM: VMBinding>(mmtk: &MMTK<VM>) -> usize {
594    mmtk.get_plan().get_used_pages() << LOG_BYTES_IN_PAGE
595}
596
597/// Return free memory in bytes. MMTk accounts for memory in pages, thus this method always returns a value in
598/// page granularity.
599///
600/// Arguments:
601/// * `mmtk`: A reference to an MMTk instance.
602pub fn free_bytes<VM: VMBinding>(mmtk: &MMTK<VM>) -> usize {
603    mmtk.get_plan().get_free_pages() << LOG_BYTES_IN_PAGE
604}
605
606/// Return a hash map for live bytes statistics in the last GC for each space.
607///
608/// MMTk usually accounts for memory in pages by each space.
609/// This is a special method that we count the size of every live object in a GC, and sum up the total bytes.
610/// We provide this method so users can use [`crate::LiveBytesStats`] to know if
611/// the space is fragmented.
612/// The value returned by this method is only updated when we finish tracing in a GC. A recommended timing
613/// to call this method is at the end of a GC (e.g. when the runtime is about to resume threads).
614pub fn live_bytes_in_last_gc<VM: VMBinding>(
615    mmtk: &MMTK<VM>,
616) -> HashMap<&'static str, crate::LiveBytesStats> {
617    mmtk.state.live_bytes_in_last_gc.borrow().clone()
618}
619
620/// Return the starting address of the heap. *Note that currently MMTk uses
621/// a fixed address range as heap.*
622pub fn starting_heap_address() -> Address {
623    vm_layout().heap_start
624}
625
626/// Return the ending address of the heap. *Note that currently MMTk uses
627/// a fixed address range as heap.*
628pub fn last_heap_address() -> Address {
629    vm_layout().heap_end
630}
631
632/// Return the total memory in bytes.
633///
634/// Arguments:
635/// * `mmtk`: A reference to an MMTk instance.
636pub fn total_bytes<VM: VMBinding>(mmtk: &MMTK<VM>) -> usize {
637    mmtk.get_plan().get_total_pages() << LOG_BYTES_IN_PAGE
638}
639
640/// The application code has requested a collection. This is just a GC hint, and
641/// we may ignore it.
642///
643/// Returns whether a GC was ran or not. If MMTk triggers a GC, this method will block the
644/// calling thread and return true when the GC finishes. Otherwise, this method returns
645/// false immediately.
646///
647/// Arguments:
648/// * `mmtk`: A reference to an MMTk instance.
649/// * `tls`: The thread that triggers this collection request.
650pub fn handle_user_collection_request<VM: VMBinding>(
651    mmtk: &MMTK<VM>,
652    tls: VMMutatorThread,
653) -> bool {
654    mmtk.handle_user_collection_request(tls, false, false)
655}
656
657/// Is the object alive?
658///
659/// Arguments:
660/// * `object`: The object reference to query.
661pub fn is_live_object(object: ObjectReference) -> bool {
662    object.is_live()
663}
664
665/// Check if `addr` is the raw address of an object reference to an MMTk object.
666///
667/// Concretely:
668/// 1.  Return `Some(object)` if `ObjectReference::from_raw_address(addr)` is a valid object
669///     reference to an object in any space in MMTk. `object` is the result of
670///     `ObjectReference::from_raw_address(addr)`.
671/// 2.  Return `None` otherwise.
672///
673/// This function is useful for conservative root scanning.  The VM can iterate through all words in
674/// a stack, filter out zeros, misaligned words, obviously out-of-range words (such as addresses
675/// greater than `0x0000_7fff_ffff_ffff` on Linux on x86_64), and use this function to deside if the
676/// word is really a reference.
677///
678/// This function does not handle internal pointers. If a binding may have internal pointers on
679/// the stack, and requires identifying the base reference for an internal pointer, they should use
680/// [`find_object_from_internal_pointer`] instead.
681///
682/// Note: This function has special behaviors if the VM space (enabled by the `vm_space` feature)
683/// is present.  See `crate::plan::global::BasePlan::vm_space`.
684///
685/// Argument:
686/// * `addr`: A non-zero word-aligned address.  Because the raw address of an `ObjectReference`
687///   cannot be zero and must be word-aligned, the caller must filter out zero and misaligned
688///   addresses before calling this function.  Otherwise the behavior is undefined.
689#[cfg(feature = "is_mmtk_object")]
690pub fn is_mmtk_object(addr: Address) -> Option<ObjectReference> {
691    crate::util::is_mmtk_object::check_object_reference(addr)
692}
693
694/// Find if there is an object with VO bit set for the given address range.
695/// This should be used instead of [`crate::memory_manager::is_mmtk_object`] for conservative stack scanning if
696/// the binding may have internal pointers on the stack.
697///
698/// Note that, we only consider pointers that point to addresses that are equal to or greater than
699/// the raw addresss of the object's `ObjectReference`, and within the allocation as 'internal
700/// pointers'. To be precise, for each object ref `obj_ref`, internal pointers are in the range
701/// `[obj_ref.to_raw_address(), obj_ref.to_object_start() +
702/// ObjectModel::get_current_size(obj_ref))`. If a binding defines internal pointers differently,
703/// calling this method is undefined behavior. If this is the case for you, please submit an issue
704/// or engage us on Zulip to discuss more.
705///
706/// Note that, in the similar situation as [`crate::memory_manager::is_mmtk_object`], the binding should filter
707/// out obvious non-pointers (e.g. alignment check, bound check, etc) before calling this function to avoid unnecessary
708/// cost. This method is not cheap.
709///
710/// To minimize the cost, the user should also use a small `max_search_bytes`.
711///
712/// Note: This function has special behaviors if the VM space (enabled by the `vm_space` feature)
713/// is present.  See `crate::plan::global::BasePlan::vm_space`.
714///
715/// Argument:
716/// * `internal_ptr`: The address to start searching. We search backwards from this address (including this address) to find the base reference.
717/// * `max_search_bytes`: The maximum number of bytes we may search for an object with VO bit set. `internal_ptr - max_search_bytes` is not included.
718#[cfg(feature = "is_mmtk_object")]
719pub fn find_object_from_internal_pointer(
720    internal_ptr: Address,
721    max_search_bytes: usize,
722) -> Option<ObjectReference> {
723    crate::util::is_mmtk_object::check_internal_reference(internal_ptr, max_search_bytes)
724}
725
726/// Return true if the `object` lies in a region of memory where
727/// -   only MMTk can allocate into, or
728/// -   only MMTk's delegated memory allocator (such as a malloc implementation) can allocate into
729///     for allocation requests from MMTk.
730///
731/// Return false otherwise.  This function never panics.
732///
733/// Particularly, if this function returns true, `object` cannot be an object allocated by the VM
734/// itself.
735///
736/// If this function returns true, the object cannot be allocate by the `malloc` function called by
737/// the VM, either. In other words, if the `MallocSpace` of MMTk called `malloc` to allocate the
738/// object for the VM in response to `memory_manager::alloc`, this function will return true; but
739/// if the VM directly called `malloc` to allocate the object, this function will return false.
740///
741/// If `is_mmtk_object(object.to_raw_address())` returns true, `is_in_mmtk_spaces(object)` must also
742/// return true.
743///
744/// This function is useful if an object reference in the VM can be either a pointer into the MMTk
745/// heap, or a pointer to non-MMTk objects.  If the VM has a pre-built boot image that contains
746/// primordial objects, or if the VM has its own allocator or uses any third-party allocators, or
747/// if the VM allows an object reference to point to native objects such as C++ objects, this
748/// function can distinguish between MMTk-allocated objects and other objects.
749///
750/// Note: This function has special behaviors if the VM space (enabled by the `vm_space` feature)
751/// is present.  See `crate::plan::global::BasePlan::vm_space`.
752///
753/// Arguments:
754/// * `object`: The object reference to query.
755pub fn is_in_mmtk_spaces(object: ObjectReference) -> bool {
756    use crate::mmtk::SFT_MAP;
757    SFT_MAP
758        .get_checked(object.to_raw_address())
759        .is_in_space(object)
760}
761
762/// Is the address in the mapped memory? The runtime can use this function to check
763/// if an address is mapped by MMTk. Note that this is different than is_in_mmtk_spaces().
764/// For malloc spaces, MMTk does not map those addresses (malloc does the mmap), so
765/// this function will return false, but is_in_mmtk_spaces will return true if the address
766/// is actually a valid object in malloc spaces. To check if an object is in our heap,
767/// the runtime should always use is_in_mmtk_spaces(). This function is_mapped_address()
768/// may get removed at some point.
769///
770/// Arguments:
771/// * `address`: The address to query.
772// TODO: Do we really need this function? Can a runtime always use is_mapped_object()?
773pub fn is_mapped_address(address: Address) -> bool {
774    address.is_mapped()
775}
776
777/// Add a reference to the list of weak references. A binding may
778/// call this either when a weak reference is created, or when a weak reference is traced during GC.
779///
780/// Arguments:
781/// * `mmtk`: A reference to an MMTk instance.
782/// * `reff`: The weak reference to add.
783pub fn add_weak_candidate<VM: VMBinding>(mmtk: &MMTK<VM>, reff: ObjectReference) {
784    mmtk.reference_processors.add_weak_candidate(reff);
785}
786
787/// Add a reference to the list of soft references. A binding may
788/// call this either when a weak reference is created, or when a weak reference is traced during GC.
789///
790/// Arguments:
791/// * `mmtk`: A reference to an MMTk instance.
792/// * `reff`: The soft reference to add.
793pub fn add_soft_candidate<VM: VMBinding>(mmtk: &MMTK<VM>, reff: ObjectReference) {
794    mmtk.reference_processors.add_soft_candidate(reff);
795}
796
797/// Add a reference to the list of phantom references. A binding may
798/// call this either when a weak reference is created, or when a weak reference is traced during GC.
799///
800/// Arguments:
801/// * `mmtk`: A reference to an MMTk instance.
802/// * `reff`: The phantom reference to add.
803pub fn add_phantom_candidate<VM: VMBinding>(mmtk: &MMTK<VM>, reff: ObjectReference) {
804    mmtk.reference_processors.add_phantom_candidate(reff);
805}
806
807/// Generic hook to allow benchmarks to be harnessed. We do a full heap
808/// GC, and then start recording statistics for MMTk.
809///
810/// Arguments:
811/// * `mmtk`: A reference to an MMTk instance.
812/// * `tls`: The thread that calls the function (and triggers a collection).
813pub fn harness_begin<VM: VMBinding>(mmtk: &MMTK<VM>, tls: VMMutatorThread) {
814    mmtk.harness_begin(tls);
815}
816
817/// Generic hook to allow benchmarks to be harnessed. We stop collecting
818/// statistics, and print stats values.
819///
820/// Arguments:
821/// * `mmtk`: A reference to an MMTk instance.
822pub fn harness_end<VM: VMBinding>(mmtk: &'static MMTK<VM>) {
823    mmtk.harness_end();
824}
825
826/// Register a finalizable object. MMTk will retain the liveness of
827/// the object even if it is not reachable from the program.
828/// Note that finalization upon exit is not supported.
829///
830/// Arguments:
831/// * `mmtk`: A reference to an MMTk instance
832/// * `object`: The object that has a finalizer
833pub fn add_finalizer<VM: VMBinding>(
834    mmtk: &'static MMTK<VM>,
835    object: <VM::VMReferenceGlue as ReferenceGlue<VM>>::FinalizableType,
836) {
837    if *mmtk.options.no_finalizer {
838        warn!("add_finalizer() is called when no_finalizer = true");
839    }
840
841    mmtk.finalizable_processor.lock().unwrap().add(object);
842}
843
844/// Pin an object. MMTk will make sure that the object does not move
845/// during GC. Note that action cannot happen in some plans, eg, semispace.
846/// It returns true if the pinning operation has been performed, i.e.,
847/// the object status changed from non-pinned to pinned
848///
849/// Arguments:
850/// * `object`: The object to be pinned
851#[cfg(feature = "object_pinning")]
852pub fn pin_object(object: ObjectReference) -> bool {
853    use crate::mmtk::SFT_MAP;
854    SFT_MAP
855        .get_checked(object.to_raw_address())
856        .pin_object(object)
857}
858
859/// Unpin an object.
860/// Returns true if the unpinning operation has been performed, i.e.,
861/// the object status changed from pinned to non-pinned
862///
863/// Arguments:
864/// * `object`: The object to be pinned
865#[cfg(feature = "object_pinning")]
866pub fn unpin_object(object: ObjectReference) -> bool {
867    use crate::mmtk::SFT_MAP;
868    SFT_MAP
869        .get_checked(object.to_raw_address())
870        .unpin_object(object)
871}
872
873/// Check whether an object is currently pinned
874///
875/// Arguments:
876/// * `object`: The object to be checked
877#[cfg(feature = "object_pinning")]
878pub fn is_pinned(object: ObjectReference) -> bool {
879    use crate::mmtk::SFT_MAP;
880    SFT_MAP
881        .get_checked(object.to_raw_address())
882        .is_object_pinned(object)
883}
884
885/// Get an object that is ready for finalization. After each GC, if any registered object is not
886/// alive, this call will return one of the objects. MMTk will retain the liveness of those objects
887/// until they are popped through this call. Once an object is popped, it is the responsibility of
888/// the VM to make sure they are properly finalized before reclaimed by the GC. This call is non-blocking,
889/// and will return None if no object is ready for finalization.
890///
891/// Arguments:
892/// * `mmtk`: A reference to an MMTk instance.
893pub fn get_finalized_object<VM: VMBinding>(
894    mmtk: &'static MMTK<VM>,
895) -> Option<<VM::VMReferenceGlue as ReferenceGlue<VM>>::FinalizableType> {
896    if *mmtk.options.no_finalizer {
897        warn!("get_finalized_object() is called when no_finalizer = true");
898    }
899
900    mmtk.finalizable_processor
901        .lock()
902        .unwrap()
903        .get_ready_object()
904}
905
906/// Pop all the finalizers that were registered for finalization. The returned objects may or may not be ready for
907/// finalization. After this call, MMTk's finalizer processor should have no registered finalizer any more.
908///
909/// This is useful for some VMs which require all finalizable objects to be finalized on exit.
910///
911/// Arguments:
912/// * `mmtk`: A reference to an MMTk instance.
913pub fn get_all_finalizers<VM: VMBinding>(
914    mmtk: &'static MMTK<VM>,
915) -> Vec<<VM::VMReferenceGlue as ReferenceGlue<VM>>::FinalizableType> {
916    if *mmtk.options.no_finalizer {
917        warn!("get_all_finalizers() is called when no_finalizer = true");
918    }
919
920    mmtk.finalizable_processor
921        .lock()
922        .unwrap()
923        .get_all_finalizers()
924}
925
926/// Pop finalizers that were registered and associated with a certain object. The returned objects may or may not be ready for finalization.
927/// This is useful for some VMs that may manually execute finalize method for an object.
928///
929/// Arguments:
930/// * `mmtk`: A reference to an MMTk instance.
931/// * `object`: the given object that MMTk will pop its finalizers
932pub fn get_finalizers_for<VM: VMBinding>(
933    mmtk: &'static MMTK<VM>,
934    object: ObjectReference,
935) -> Vec<<VM::VMReferenceGlue as ReferenceGlue<VM>>::FinalizableType> {
936    if *mmtk.options.no_finalizer {
937        warn!("get_finalizers() is called when no_finalizer = true");
938    }
939
940    mmtk.finalizable_processor
941        .lock()
942        .unwrap()
943        .get_finalizers_for(object)
944}
945
946/// Get the number of workers. MMTk spawns worker threads for the 'threads' defined in the options.
947/// So the number of workers is derived from the threads option. Note the feature single_worker overwrites
948/// the threads option, and force one worker thread.
949///
950/// Arguments:
951/// * `mmtk`: A reference to an MMTk instance.
952pub fn num_of_workers<VM: VMBinding>(mmtk: &'static MMTK<VM>) -> usize {
953    mmtk.scheduler.num_workers()
954}
955
956/// Add a work packet to the given work bucket. Note that this simply adds the work packet to the given
957/// work bucket, and the scheduler will decide when to execute the work packet.
958///
959/// Arguments:
960/// * `mmtk`: A reference to an MMTk instance.
961/// * `bucket`: Which work bucket to add this packet to.
962/// * `packet`: The work packet to be added.
963pub fn add_work_packet<VM: VMBinding, W: GCWork<VM>>(
964    mmtk: &'static MMTK<VM>,
965    bucket: WorkBucketStage,
966    packet: W,
967) {
968    mmtk.scheduler.work_buckets[bucket].add(packet)
969}
970
971/// Bulk add a number of work packets to the given work bucket. Note that this simply adds the work packets
972/// to the given work bucket, and the scheduler will decide when to execute the work packets.
973///
974/// Arguments:
975/// * `mmtk`: A reference to an MMTk instance.
976/// * `bucket`: Which work bucket to add these packets to.
977/// * `packet`: The work packets to be added.
978pub fn add_work_packets<VM: VMBinding>(
979    mmtk: &'static MMTK<VM>,
980    bucket: WorkBucketStage,
981    packets: Vec<Box<dyn GCWork<VM>>>,
982) {
983    mmtk.scheduler.work_buckets[bucket].bulk_add(packets)
984}