1use std::sync::Arc;
4
5use crate::policy::marksweepspace::native_ms::*;
6use crate::util::alloc::allocator;
7use crate::util::alloc::Allocator;
8use crate::util::linear_scan::Region;
9use crate::util::Address;
10use crate::util::VMThread;
11use crate::vm::VMBinding;
12
13use super::allocator::AllocatorContext;
14
15#[repr(C)]
17pub struct FreeListAllocator<VM: VMBinding> {
18 pub tls: VMThread,
20 space: &'static MarkSweepSpace<VM>,
21 context: Arc<AllocatorContext<VM>>,
22 pub available_blocks: BlockLists,
24 pub available_blocks_stress: BlockLists,
30 pub unswept_blocks: BlockLists,
32 pub consumed_blocks: BlockLists,
34}
35
36impl<VM: VMBinding> Allocator<VM> for FreeListAllocator<VM> {
37 fn get_tls(&self) -> VMThread {
38 self.tls
39 }
40
41 fn get_space(&self) -> &'static dyn crate::policy::space::Space<VM> {
42 self.space
43 }
44
45 fn get_context(&self) -> &AllocatorContext<VM> {
46 &self.context
47 }
48
49 fn alloc(&mut self, size: usize, align: usize, offset: usize) -> Address {
51 debug_assert!(
52 size <= MAX_BIN_SIZE,
53 "Alloc request for {} bytes is too big.",
54 size
55 );
56 debug_assert!(align <= VM::MAX_ALIGNMENT);
57 debug_assert!(align >= VM::MIN_ALIGNMENT);
58
59 if let Some(block) = self.find_free_block_local(size, align) {
60 let cell = self.block_alloc(block);
61 if !cell.is_zero() {
62 debug_assert!(
64 !(*self.context.options.precise_stress
65 && self.context.options.is_stress_test_gc_enabled())
66 );
67
68 let res = allocator::align_allocation::<VM>(cell, align, offset);
69 #[cfg(debug_assertions)]
71 {
72 let cell_size = block.load_block_cell_size();
73 debug_assert!(
74 res + size <= cell + cell_size,
75 "Allocating (size = {}, align = {}, offset = {}) to the cell {} of size {}, but the end of the allocation region {} is beyond the cell end {}",
76 size, align, offset, cell, cell_size, res + size, cell + cell_size
77 );
78 }
79 return res;
80 }
81 }
82
83 self.alloc_slow(size, align, offset)
84 }
85
86 fn alloc_slow_once(&mut self, size: usize, align: usize, offset: usize) -> Address {
87 if let Some(block) = self.acquire_global_block(size, align, false) {
89 let addr = self.block_alloc(block);
90 allocator::align_allocation::<VM>(addr, align, offset)
91 } else {
92 Address::ZERO
93 }
94 }
95
96 fn does_thread_local_allocation(&self) -> bool {
97 true
98 }
99
100 fn get_thread_local_buffer_granularity(&self) -> usize {
101 Block::BYTES
102 }
103
104 fn alloc_slow_once_precise_stress(
105 &mut self,
106 size: usize,
107 align: usize,
108 offset: usize,
109 need_poll: bool,
110 ) -> Address {
111 trace!("allow slow precise stress s={}", size);
112 if need_poll {
113 self.acquire_global_block(0, 0, true);
114 }
115
116 if let Some(block) = self.find_free_block_stress(size, align) {
118 let cell = self.block_alloc(block);
119 allocator::align_allocation::<VM>(cell, align, offset)
120 } else {
121 Address::ZERO
122 }
123 }
124
125 fn on_mutator_destroy(&mut self) {
126 let mut global = self.space.get_abandoned_block_lists().lock().unwrap();
127 self.abandon_blocks(&mut global);
128 }
129}
130
131impl<VM: VMBinding> FreeListAllocator<VM> {
132 pub(crate) fn new(
134 tls: VMThread,
135 space: &'static MarkSweepSpace<VM>,
136 context: Arc<AllocatorContext<VM>>,
137 ) -> Self {
138 FreeListAllocator {
139 tls,
140 space,
141 context,
142 available_blocks: new_empty_block_lists(),
143 available_blocks_stress: new_empty_block_lists(),
144 unswept_blocks: new_empty_block_lists(),
145 consumed_blocks: new_empty_block_lists(),
146 }
147 }
148
149 fn block_alloc(&mut self, block: Block) -> Address {
151 let cell = block.load_free_list();
152 if cell.is_zero() {
153 return cell; }
155 let next_cell = unsafe { cell.load::<Address>() };
156 unsafe { cell.store::<Address>(Address::ZERO) };
158 debug_assert!(
159 next_cell.is_zero() || block.includes_address(next_cell),
160 "next_cell {} is not in {:?}",
161 next_cell,
162 block
163 );
164 block.store_free_list(next_cell);
165
166 let cell_size = block.load_block_cell_size();
169 crate::util::memory::zero(cell, cell_size);
170
171 #[cfg(debug_assertions)]
174 {
175 let mut cursor = cell;
176 while cursor < cell + cell_size {
177 debug_assert_eq!(unsafe { cursor.load::<usize>() }, 0);
178 cursor += crate::util::constants::BYTES_IN_ADDRESS;
179 }
180 }
181
182 cell
183 }
184
185 fn find_free_block_stress(&mut self, size: usize, align: usize) -> Option<Block> {
187 Self::find_free_block_with(
188 &mut self.available_blocks_stress,
189 &mut self.consumed_blocks,
190 size,
191 align,
192 )
193 .or_else(|| self.recycle_local_blocks(size, align, true))
194 .or_else(|| self.acquire_global_block(size, align, true))
195 }
196
197 fn find_free_block_local(&mut self, size: usize, align: usize) -> Option<Block> {
199 Self::find_free_block_with(
200 &mut self.available_blocks,
201 &mut self.consumed_blocks,
202 size,
203 align,
204 )
205 .or_else(|| self.recycle_local_blocks(size, align, false))
206 }
207
208 fn find_free_block_with(
213 available_blocks: &mut BlockLists,
214 consumed_blocks: &mut BlockLists,
215 size: usize,
216 align: usize,
217 ) -> Option<Block> {
218 let bin = mi_bin::<VM>(size, align);
219 debug_assert!(bin <= MAX_BIN);
220
221 let available = &mut available_blocks[bin];
222 debug_assert!(available.size >= size);
223
224 if !available.is_empty() {
225 let mut cursor = available.first;
226
227 while let Some(block) = cursor {
228 if block.has_free_cells() {
229 return Some(block);
230 }
231 available.pop();
232 consumed_blocks.get_mut(bin).unwrap().push(block);
233
234 cursor = available.first;
235 }
236 }
237
238 debug_assert!(available_blocks[bin].is_empty());
239 None
240 }
241
242 fn add_to_available_blocks(&mut self, bin: usize, block: Block, stress: bool) {
245 if stress {
246 debug_assert!(*self.context.options.precise_stress);
247 self.available_blocks_stress[bin].push(block);
248 } else {
249 self.available_blocks[bin].push(block);
250 }
251 }
252
253 fn recycle_local_blocks(
255 &mut self,
256 size: usize,
257 align: usize,
258 _stress_test: bool,
259 ) -> Option<Block> {
260 if cfg!(feature = "eager_sweeping") {
261 None
263 } else {
264 loop {
266 let bin = mi_bin::<VM>(size, align);
267 debug_assert!(self.available_blocks[bin].is_empty()); if let Some(block) = self.unswept_blocks.get_mut(bin).unwrap().pop() {
270 block.sweep::<VM>();
271 if block.has_free_cells() {
272 self.add_to_available_blocks(
274 bin,
275 block,
276 self.context.options.is_stress_test_gc_enabled(),
277 );
278 return Some(block);
279 } else {
280 self.consumed_blocks.get_mut(bin).unwrap().push(block);
282 }
283 } else {
284 return None;
285 }
286 }
287 }
288 }
289
290 fn acquire_global_block(
292 &mut self,
293 size: usize,
294 align: usize,
295 stress_test: bool,
296 ) -> Option<Block> {
297 let bin = mi_bin::<VM>(size, align);
298 loop {
299 match self.space.acquire_block(self.tls, size, align, self.get_context().get_alloc_options()) {
300 crate::policy::marksweepspace::native_ms::BlockAcquireResult::Exhausted => {
301 debug!("Acquire global block: None");
302 return None;
304 }
305
306 crate::policy::marksweepspace::native_ms::BlockAcquireResult::Fresh(block) => {
307 debug!("Acquire global block: Fresh {:?}", block);
308 self.add_to_available_blocks(bin, block, stress_test);
309 self.init_block(block, self.available_blocks[bin].size);
310
311 return Some(block);
312 }
313
314 crate::policy::marksweepspace::native_ms::BlockAcquireResult::AbandonedAvailable(block) => {
315 debug!("Acquire global block: AbandonedAvailable {:?}", block);
316 block.store_tls(self.tls);
317 if block.has_free_cells() {
318 self.add_to_available_blocks(bin, block, stress_test);
319 return Some(block);
320 } else {
321 self.consumed_blocks[bin].push(block);
322 }
323 }
324
325 crate::policy::marksweepspace::native_ms::BlockAcquireResult::AbandonedUnswept(block) => {
326 debug!("Acquire global block: AbandonedUnswep {:?}", block);
327 block.store_tls(self.tls);
328 block.sweep::<VM>();
329 if block.has_free_cells() {
330 self.add_to_available_blocks(bin, block, stress_test);
331 return Some(block);
332 } else {
333 self.consumed_blocks[bin].push(block);
334 }
335 }
336 }
337 }
338 }
339
340 fn init_block(&self, block: Block, cell_size: usize) {
341 debug_assert_ne!(cell_size, 0);
342 self.space.record_new_block(block);
343
344 let block_end = block.start() + Block::BYTES;
346 let mut old_cell = unsafe { Address::zero() };
347 let mut new_cell = block.start();
348
349 let final_cell = loop {
350 unsafe {
351 new_cell.store::<Address>(old_cell);
352 }
353 old_cell = new_cell;
354 new_cell += cell_size;
355 if new_cell + cell_size > block_end {
356 break old_cell;
357 };
358 };
359
360 block.store_free_list(final_cell);
361 block.store_block_cell_size(cell_size);
362 #[cfg(feature = "malloc_native_mimalloc")]
363 {
364 block.store_local_free_list(Address::ZERO);
365 block.store_thread_free_list(Address::ZERO);
366 }
367
368 self.store_block_tls(block);
369 }
370
371 #[cfg(feature = "malloc_native_mimalloc")]
372 fn free(&self, addr: Address) {
373 assert!(!addr.is_zero(), "Attempted to free zero address.");
374
375 use crate::util::ObjectReference;
376 let block = Block::from_unaligned_address(addr);
377 let block_tls = block.load_tls();
378
379 if self.tls == block_tls {
380 let local_free = block.load_local_free_list();
382 unsafe {
383 addr.store(local_free);
384 }
385 block.store_local_free_list(addr);
386 } else {
387 unreachable!(
389 "tlss don't match freeing from block {}, my tls = {:?}, block tls = {:?}",
390 block.start(),
391 self.tls,
392 block.load_tls()
393 );
394
395 }
405
406 crate::util::metadata::vo_bit::unset_vo_bit(unsafe {
410 ObjectReference::from_raw_address_unchecked(addr)
411 })
412 }
413
414 fn store_block_tls(&self, block: Block) {
415 block.store_tls(self.tls);
416 }
417
418 pub(crate) fn prepare(&mut self) {}
419
420 pub(crate) fn release(&mut self) {
421 for bin in 0..MI_BIN_FULL {
422 let unswept = self.unswept_blocks.get_mut(bin).unwrap();
423
424 #[cfg(feature = "eager_sweeping")]
426 debug_assert!(unswept.is_empty());
427
428 let mut sweep_later = |list: &mut BlockList| {
429 list.release_blocks(self.space);
430
431 if cfg!(not(feature = "eager_sweeping")) {
437 unswept.append(list);
438 }
439 };
440
441 sweep_later(&mut self.available_blocks[bin]);
442 sweep_later(&mut self.available_blocks_stress[bin]);
443 sweep_later(&mut self.consumed_blocks[bin]);
444 }
445
446 {
449 let mut global = self.space.get_abandoned_block_lists_in_gc().lock().unwrap();
450 self.abandon_blocks(&mut global);
451 }
452
453 self.space.release_packet_done();
454 }
455
456 fn abandon_blocks(&mut self, global: &mut AbandonedBlockLists) {
457 for i in 0..MI_BIN_FULL {
458 let available = self.available_blocks.get_mut(i).unwrap();
459 if !available.is_empty() {
460 global.available[i].append(available);
461 }
462
463 let available_stress = self.available_blocks_stress.get_mut(i).unwrap();
464 if !available_stress.is_empty() {
465 global.available[i].append(available_stress);
466 }
467
468 let consumed = self.consumed_blocks.get_mut(i).unwrap();
469 if !consumed.is_empty() {
470 global.consumed[i].append(consumed);
471 }
472
473 let unswept = self.unswept_blocks.get_mut(i).unwrap();
474 if !unswept.is_empty() {
475 global.unswept[i].append(unswept);
476 }
477 }
478 }
479}