1use std::sync::Arc;
4
5use super::allocator::AllocatorContext;
6use crate::policy::marksweepspace::native_ms::*;
7use crate::util::alloc::allocator;
8use crate::util::alloc::Allocator;
9use crate::util::linear_scan::Region;
10use crate::util::Address;
11use crate::util::VMThread;
12use crate::vm::VMBinding;
13
14#[repr(C)]
16pub struct FreeListAllocator<VM: VMBinding> {
17 pub tls: VMThread,
19 space: &'static MarkSweepSpace<VM>,
20 context: Arc<AllocatorContext<VM>>,
21 pub available_blocks: BlockLists,
23 pub available_blocks_stress: BlockLists,
29 pub unswept_blocks: BlockLists,
31 pub consumed_blocks: BlockLists,
33}
34
35impl<VM: VMBinding> Allocator<VM> for FreeListAllocator<VM> {
36 fn get_tls(&self) -> VMThread {
37 self.tls
38 }
39
40 fn get_space(&self) -> &'static dyn crate::policy::space::Space<VM> {
41 self.space
42 }
43
44 fn get_context(&self) -> &AllocatorContext<VM> {
45 &self.context
46 }
47
48 fn alloc(&mut self, size: usize, align: usize, offset: usize) -> Address {
50 debug_assert!(
51 size <= MAX_BIN_SIZE,
52 "Alloc request for {} bytes is too big.",
53 size
54 );
55 debug_assert!(align <= VM::MAX_ALIGNMENT);
56 debug_assert!(align >= VM::MIN_ALIGNMENT);
57
58 if let Some(block) = self.find_free_block_local(size, align) {
59 let cell = self.block_alloc(block);
60 if !cell.is_zero() {
61 debug_assert!(
63 !(*self.context.options.precise_stress
64 && self.context.options.is_stress_test_gc_enabled())
65 );
66
67 let res = allocator::align_allocation::<VM>(cell, align, offset);
68 #[cfg(debug_assertions)]
70 {
71 let cell_size = block.load_block_cell_size();
72 debug_assert!(
73 res + size <= cell + cell_size,
74 "Allocating (size = {}, align = {}, offset = {}) to the cell {} of size {}, but the end of the allocation region {} is beyond the cell end {}",
75 size, align, offset, cell, cell_size, res + size, cell + cell_size
76 );
77 }
78 return res;
79 }
80 }
81
82 self.alloc_slow(size, align, offset)
83 }
84
85 fn alloc_slow_once(&mut self, size: usize, align: usize, offset: usize) -> Address {
86 if let Some(block) = self.acquire_global_block(size, align, false) {
88 let addr = self.block_alloc(block);
89 allocator::align_allocation::<VM>(addr, align, offset)
90 } else {
91 Address::ZERO
92 }
93 }
94
95 fn does_thread_local_allocation(&self) -> bool {
96 true
97 }
98
99 fn get_thread_local_buffer_granularity(&self) -> usize {
100 Block::BYTES
101 }
102
103 fn alloc_slow_once_precise_stress(
104 &mut self,
105 size: usize,
106 align: usize,
107 offset: usize,
108 need_poll: bool,
109 ) -> Address {
110 trace!("allow slow precise stress s={}", size);
111 if need_poll {
112 self.acquire_global_block(0, 0, true);
113 }
114
115 if let Some(block) = self.find_free_block_stress(size, align) {
117 let cell = self.block_alloc(block);
118 allocator::align_allocation::<VM>(cell, align, offset)
119 } else {
120 Address::ZERO
121 }
122 }
123
124 fn on_mutator_destroy(&mut self) {
125 let mut global = self.space.get_abandoned_block_lists().lock().unwrap();
126 self.abandon_blocks(&mut global);
127 }
128}
129
130impl<VM: VMBinding> FreeListAllocator<VM> {
131 pub(crate) fn new(
133 tls: VMThread,
134 space: &'static MarkSweepSpace<VM>,
135 context: Arc<AllocatorContext<VM>>,
136 ) -> Self {
137 FreeListAllocator {
138 tls,
139 space,
140 context,
141 available_blocks: new_empty_block_lists(),
142 available_blocks_stress: new_empty_block_lists(),
143 unswept_blocks: new_empty_block_lists(),
144 consumed_blocks: new_empty_block_lists(),
145 }
146 }
147
148 fn block_alloc(&mut self, block: Block) -> Address {
150 let cell = block.load_free_list();
151 if cell.is_zero() {
152 return cell; }
154 let next_cell = unsafe { cell.load::<Address>() };
155 unsafe { cell.store::<Address>(Address::ZERO) };
157 debug_assert!(
158 next_cell.is_zero() || block.includes_address(next_cell),
159 "next_cell {} is not in {:?}",
160 next_cell,
161 block
162 );
163 block.store_free_list(next_cell);
164
165 let cell_size = block.load_block_cell_size();
168 crate::util::memory::zero(cell, cell_size);
169
170 #[cfg(debug_assertions)]
173 {
174 let mut cursor = cell;
175 while cursor < cell + cell_size {
176 debug_assert_eq!(unsafe { cursor.load::<usize>() }, 0);
177 cursor += crate::util::constants::BYTES_IN_ADDRESS;
178 }
179 }
180
181 cell
182 }
183
184 fn find_free_block_stress(&mut self, size: usize, align: usize) -> Option<Block> {
186 Self::find_free_block_with(
187 &mut self.available_blocks_stress,
188 &mut self.consumed_blocks,
189 size,
190 align,
191 )
192 .or_else(|| self.recycle_local_blocks(size, align, true))
193 .or_else(|| self.acquire_global_block(size, align, true))
194 }
195
196 fn find_free_block_local(&mut self, size: usize, align: usize) -> Option<Block> {
198 Self::find_free_block_with(
199 &mut self.available_blocks,
200 &mut self.consumed_blocks,
201 size,
202 align,
203 )
204 .or_else(|| self.recycle_local_blocks(size, align, false))
205 }
206
207 fn find_free_block_with(
212 available_blocks: &mut BlockLists,
213 consumed_blocks: &mut BlockLists,
214 size: usize,
215 align: usize,
216 ) -> Option<Block> {
217 let bin = mi_bin::<VM>(size, align);
218 debug_assert!(bin <= MAX_BIN);
219
220 let available = &mut available_blocks[bin];
221 debug_assert!(available.size >= size);
222
223 if !available.is_empty() {
224 let mut cursor = available.first;
225
226 while let Some(block) = cursor {
227 if block.has_free_cells() {
228 return Some(block);
229 }
230 available.pop();
231 consumed_blocks.get_mut(bin).unwrap().push(block);
232
233 cursor = available.first;
234 }
235 }
236
237 debug_assert!(available_blocks[bin].is_empty());
238 None
239 }
240
241 fn add_to_available_blocks(&mut self, bin: usize, block: Block, stress: bool) {
244 if stress {
245 debug_assert!(*self.context.options.precise_stress);
246 self.available_blocks_stress[bin].push(block);
247 } else {
248 self.available_blocks[bin].push(block);
249 }
250 }
251
252 fn recycle_local_blocks(
254 &mut self,
255 size: usize,
256 align: usize,
257 _stress_test: bool,
258 ) -> Option<Block> {
259 if cfg!(feature = "eager_sweeping") {
260 None
262 } else {
263 loop {
265 let bin = mi_bin::<VM>(size, align);
266 debug_assert!(self.available_blocks[bin].is_empty()); if let Some(block) = self.unswept_blocks.get_mut(bin).unwrap().pop() {
269 block.sweep::<VM>();
270 if block.has_free_cells() {
271 self.add_to_available_blocks(
273 bin,
274 block,
275 self.context.options.is_stress_test_gc_enabled(),
276 );
277 return Some(block);
278 } else {
279 self.consumed_blocks.get_mut(bin).unwrap().push(block);
281 }
282 } else {
283 return None;
284 }
285 }
286 }
287 }
288
289 fn acquire_global_block(
291 &mut self,
292 size: usize,
293 align: usize,
294 stress_test: bool,
295 ) -> Option<Block> {
296 let bin = mi_bin::<VM>(size, align);
297 loop {
298 match self.space.acquire_block(self.tls, size, align, self.get_context().get_alloc_options()) {
299 crate::policy::marksweepspace::native_ms::BlockAcquireResult::Exhausted => {
300 debug!("Acquire global block: None");
301 return None;
303 }
304
305 crate::policy::marksweepspace::native_ms::BlockAcquireResult::Fresh(block) => {
306 debug!("Acquire global block: Fresh {:?}", block);
307 self.add_to_available_blocks(bin, block, stress_test);
308 self.init_block(block, self.available_blocks[bin].size);
309
310 return Some(block);
311 }
312
313 crate::policy::marksweepspace::native_ms::BlockAcquireResult::AbandonedAvailable(block) => {
314 debug!("Acquire global block: AbandonedAvailable {:?}", block);
315 block.store_tls(self.tls);
316 if block.has_free_cells() {
317 self.add_to_available_blocks(bin, block, stress_test);
318 return Some(block);
319 } else {
320 self.consumed_blocks[bin].push(block);
321 }
322 }
323
324 crate::policy::marksweepspace::native_ms::BlockAcquireResult::AbandonedUnswept(block) => {
325 debug!("Acquire global block: AbandonedUnswep {:?}", block);
326 block.store_tls(self.tls);
327 block.sweep::<VM>();
328 if block.has_free_cells() {
329 self.add_to_available_blocks(bin, block, stress_test);
330 return Some(block);
331 } else {
332 self.consumed_blocks[bin].push(block);
333 }
334 }
335 }
336 }
337 }
338
339 fn init_block(&self, block: Block, cell_size: usize) {
340 debug_assert_ne!(cell_size, 0);
341 self.space.record_new_block(block);
342
343 let block_end = block.start() + Block::BYTES;
345 let mut old_cell = unsafe { Address::zero() };
346 let mut new_cell = block.start();
347
348 let final_cell = loop {
349 unsafe {
350 new_cell.store::<Address>(old_cell);
351 }
352 old_cell = new_cell;
353 new_cell += cell_size;
354 if new_cell + cell_size > block_end {
355 break old_cell;
356 };
357 };
358
359 block.store_free_list(final_cell);
360 block.store_block_cell_size(cell_size);
361 #[cfg(feature = "malloc_native_mimalloc")]
362 {
363 block.store_local_free_list(Address::ZERO);
364 block.store_thread_free_list(Address::ZERO);
365 }
366
367 self.store_block_tls(block);
368 }
369
370 #[cfg(feature = "malloc_native_mimalloc")]
371 fn free(&self, addr: Address) {
372 assert!(!addr.is_zero(), "Attempted to free zero address.");
373
374 use crate::util::ObjectReference;
375 let block = Block::from_unaligned_address(addr);
376 let block_tls = block.load_tls();
377
378 if self.tls == block_tls {
379 let local_free = block.load_local_free_list();
381 unsafe {
382 addr.store(local_free);
383 }
384 block.store_local_free_list(addr);
385 } else {
386 unreachable!(
388 "tlss don't match freeing from block {}, my tls = {:?}, block tls = {:?}",
389 block.start(),
390 self.tls,
391 block.load_tls()
392 );
393
394 }
404
405 crate::util::metadata::vo_bit::unset_vo_bit(unsafe {
409 ObjectReference::from_raw_address_unchecked(addr)
410 })
411 }
412
413 fn store_block_tls(&self, block: Block) {
414 block.store_tls(self.tls);
415 }
416
417 pub(crate) fn prepare(&mut self) {}
418
419 pub(crate) fn release(&mut self) {
420 for bin in 0..MI_BIN_FULL {
421 let unswept = self.unswept_blocks.get_mut(bin).unwrap();
422
423 #[cfg(feature = "eager_sweeping")]
425 debug_assert!(unswept.is_empty());
426
427 let mut sweep_later = |list: &mut BlockList| {
428 list.release_blocks(self.space);
429
430 if cfg!(not(feature = "eager_sweeping")) {
436 unswept.append(list);
437 }
438 };
439
440 sweep_later(&mut self.available_blocks[bin]);
441 sweep_later(&mut self.available_blocks_stress[bin]);
442 sweep_later(&mut self.consumed_blocks[bin]);
443 }
444
445 {
448 let mut global = self.space.get_abandoned_block_lists_in_gc().lock().unwrap();
449 self.abandon_blocks(&mut global);
450 }
451
452 self.space.release_packet_done();
453 }
454
455 fn abandon_blocks(&mut self, global: &mut AbandonedBlockLists) {
456 for i in 0..MI_BIN_FULL {
457 let available = self.available_blocks.get_mut(i).unwrap();
458 if !available.is_empty() {
459 global.available[i].append(available);
460 }
461
462 let available_stress = self.available_blocks_stress.get_mut(i).unwrap();
463 if !available_stress.is_empty() {
464 global.available[i].append(available_stress);
465 }
466
467 let consumed = self.consumed_blocks.get_mut(i).unwrap();
468 if !consumed.is_empty() {
469 global.consumed[i].append(consumed);
470 }
471
472 let unswept = self.unswept_blocks.get_mut(i).unwrap();
473 if !unswept.is_empty() {
474 global.unswept[i].append(unswept);
475 }
476 }
477 }
478}