mmtk/util/alloc/
large_object_allocator.rs1use std::sync::Arc;
2
3use crate::policy::largeobjectspace::LargeObjectSpace;
4use crate::policy::space::Space;
5use crate::util::alloc::{allocator, Allocator};
6use crate::util::opaque_pointer::*;
7use crate::util::Address;
8use crate::vm::VMBinding;
9
10use super::allocator::AllocatorContext;
11
12#[repr(C)]
15pub struct LargeObjectAllocator<VM: VMBinding> {
16 pub tls: VMThread,
18 space: &'static LargeObjectSpace<VM>,
20 context: Arc<AllocatorContext<VM>>,
21}
22
23impl<VM: VMBinding> Allocator<VM> for LargeObjectAllocator<VM> {
24 fn get_tls(&self) -> VMThread {
25 self.tls
26 }
27
28 fn get_context(&self) -> &AllocatorContext<VM> {
29 &self.context
30 }
31
32 fn get_space(&self) -> &'static dyn Space<VM> {
33 self.space as &'static dyn Space<VM>
35 }
36
37 fn does_thread_local_allocation(&self) -> bool {
38 false
39 }
40
41 fn alloc(&mut self, size: usize, align: usize, offset: usize) -> Address {
42 let cell: Address = self.alloc_slow(size, align, offset);
43 if !cell.is_zero() {
45 allocator::align_allocation::<VM>(cell, align, offset)
46 } else {
47 cell
48 }
49 }
50
51 fn alloc_slow_once(&mut self, size: usize, align: usize, _offset: usize) -> Address {
52 if self.handle_obvious_oom_request(self.tls, size) {
53 return Address::ZERO;
54 }
55
56 let maxbytes = allocator::get_maximum_aligned_size::<VM>(size, align);
57 let pages = crate::util::conversions::bytes_to_pages_up(maxbytes);
58 self.space
59 .allocate_pages(self.tls, pages, self.get_context().get_alloc_options())
60 }
61}
62
63impl<VM: VMBinding> LargeObjectAllocator<VM> {
64 pub(crate) fn new(
65 tls: VMThread,
66 space: &'static LargeObjectSpace<VM>,
67 context: Arc<AllocatorContext<VM>>,
68 ) -> Self {
69 LargeObjectAllocator {
70 tls,
71 space,
72 context,
73 }
74 }
75}