1use super::*;
2use crate::util::constants::{BYTES_IN_PAGE, BYTES_IN_WORD, LOG_BITS_IN_BYTE};
3use crate::util::conversions::raw_align_up;
4use crate::util::heap::layout::vm_layout::BYTES_IN_CHUNK;
5use crate::util::metadata::metadata_val_traits::*;
6use crate::util::metadata::side_metadata::layout::*;
7#[cfg(feature = "vo_bit")]
8use crate::util::metadata::vo_bit::VO_BIT_SIDE_METADATA_SPEC;
9use crate::util::os::*;
10use crate::util::Address;
11use num_traits::FromPrimitive;
12use ranges::BitByteRange;
13use std::fmt;
14use std::sync::atomic::{AtomicU8, Ordering};
15
16#[derive(Clone, Copy, PartialEq, Eq, Hash)]
23pub struct SideMetadataSpec {
24 pub name: &'static str,
26 pub is_global: bool,
29 pub offset: usize,
31 pub log_num_of_bits: usize,
33 pub log_bytes_in_region: usize,
35}
36
37impl SideMetadataSpec {
38 pub const fn uses_contiguous_side_metadata(&self) -> bool {
40 self.is_global || cfg!(target_pointer_width = "64")
41 }
42
43 pub const fn uses_chunked_side_metadata(&self) -> bool {
45 !self.uses_contiguous_side_metadata()
46 }
47
48 pub fn get_starting_address(&self) -> Address {
50 debug_assert!(self.uses_contiguous_side_metadata());
51 let base = global_side_metadata_base_address();
52 base + self.offset
53 }
54
55 pub const fn get_offset_for_chunked(&self) -> usize {
57 debug_assert!(self.uses_chunked_side_metadata());
58 self.offset
59 }
60
61 #[cfg(target_pointer_width = "64")]
63 pub const fn upper_bound_offset(&self) -> usize {
64 debug_assert!(self.uses_contiguous_side_metadata());
65 self.offset + metadata_address_range_size(self)
66 }
67
68 #[cfg(target_pointer_width = "32")]
70 pub const fn upper_bound_offset(&self) -> usize {
71 if self.uses_contiguous_side_metadata() {
72 self.offset + metadata_address_range_size(self)
73 } else {
74 self.offset + metadata_bytes_per_chunk(self.log_bytes_in_region, self.log_num_of_bits)
75 }
76 }
77
78 pub fn upper_bound_address_for_contiguous(&self) -> Address {
83 debug_assert!(self.uses_contiguous_side_metadata());
84 self.get_starting_address() + metadata_address_range_size(self)
85 }
86
87 #[cfg(target_pointer_width = "32")]
92 pub fn upper_bound_address_for_chunked(&self, data_addr: Address) -> Address {
93 debug_assert!(self.uses_chunked_side_metadata());
94 address_to_meta_chunk_addr(data_addr) + self.upper_bound_offset()
95 }
96
97 #[cfg(debug_assertions)]
100 pub(crate) fn assert_metadata_mapped(&self, data_addr: Address) {
101 let meta_start = address_to_meta_address(self, data_addr).align_down(BYTES_IN_PAGE);
102
103 trace!(
104 "ensure_metadata_is_mapped({}).meta_start({})",
105 data_addr,
106 meta_start
107 );
108
109 OS::panic_if_unmapped(meta_start, BYTES_IN_PAGE);
110 }
111
112 #[cfg(debug_assertions)]
113 pub(crate) fn are_different_metadata_bits(&self, addr1: Address, addr2: Address) -> bool {
114 let a1 = address_to_meta_address(self, addr1);
115 let a2 = address_to_meta_address(self, addr2);
116 let s1 = meta_byte_lshift(self, addr1);
117 let s2 = meta_byte_lshift(self, addr2);
118 (a1, s1) != (a2, s2)
119 }
120
121 #[cfg(debug_assertions)]
125 fn assert_value_type<T: MetadataValue>(&self, val: Option<T>) {
126 let log_b = self.log_num_of_bits;
127 match log_b {
128 _ if log_b < 3 => {
129 assert_eq!(T::LOG2, 3);
130 if let Some(v) = val {
131 assert!(
132 v.to_u8().unwrap() < (1 << (1 << log_b)),
133 "Input value {:?} is invalid for the spec {:?}",
134 v,
135 self
136 );
137 }
138 }
139 3..=6 => assert_eq!(T::LOG2, log_b as u32),
140 _ => unreachable!("side metadata > {}-bits is not supported", 1 << log_b),
141 }
142 }
143
144 pub(crate) fn is_mapped(&self, data_addr: Address) -> bool {
146 use crate::MMAPPER;
147 let meta_addr = address_to_meta_address(self, data_addr);
148 MMAPPER.is_mapped_address(meta_addr)
149 }
150
151 pub(crate) fn zero_meta_bits(
153 meta_start_addr: Address,
154 meta_start_bit: u8,
155 meta_end_addr: Address,
156 meta_end_bit: u8,
157 ) {
158 let mut visitor = |range| {
159 match range {
160 BitByteRange::Bytes { start, end } => {
161 crate::util::memory::zero(start, end - start);
162 false
163 }
164 BitByteRange::BitsInByte {
165 addr,
166 bit_start,
167 bit_end,
168 } => {
169 let mask: u8 =
172 u8::MAX.checked_shl(bit_end as u32).unwrap_or(0) | !(u8::MAX << bit_start);
173 unsafe { addr.as_ref::<AtomicU8>() }.fetch_and(mask, Ordering::SeqCst);
174 false
175 }
176 }
177 };
178 ranges::break_bit_range(
179 meta_start_addr,
180 meta_start_bit,
181 meta_end_addr,
182 meta_end_bit,
183 true,
184 &mut visitor,
185 );
186 }
187
188 pub(crate) fn set_meta_bits(
190 meta_start_addr: Address,
191 meta_start_bit: u8,
192 meta_end_addr: Address,
193 meta_end_bit: u8,
194 ) {
195 let mut visitor = |range| {
196 match range {
197 BitByteRange::Bytes { start, end } => {
198 crate::util::memory::set(start, 0xff, end - start);
199 false
200 }
201 BitByteRange::BitsInByte {
202 addr,
203 bit_start,
204 bit_end,
205 } => {
206 let mask: u8 = !(u8::MAX.checked_shl(bit_end as u32).unwrap_or(0))
209 & (u8::MAX << bit_start);
210 unsafe { addr.as_ref::<AtomicU8>() }.fetch_or(mask, Ordering::SeqCst);
211 false
212 }
213 }
214 };
215 ranges::break_bit_range(
216 meta_start_addr,
217 meta_start_bit,
218 meta_end_addr,
219 meta_end_bit,
220 true,
221 &mut visitor,
222 );
223 }
224
225 pub(super) fn bulk_update_metadata(
228 &self,
229 start: Address,
230 size: usize,
231 update_meta_bits: &impl Fn(Address, u8, Address, u8),
232 ) {
233 let update_contiguous = |data_start: Address, data_bytes: usize| {
236 if data_bytes == 0 {
237 return;
238 }
239 let meta_start = address_to_meta_address(self, data_start);
240 let meta_start_shift = meta_byte_lshift(self, data_start);
241 let meta_end = address_to_meta_address(self, data_start + data_bytes);
242 let meta_end_shift = meta_byte_lshift(self, data_start + data_bytes);
243 update_meta_bits(meta_start, meta_start_shift, meta_end, meta_end_shift);
244 };
245
246 #[cfg(target_pointer_width = "32")]
254 let update_discontiguous = |data_start: Address, data_bytes: usize| {
255 use crate::util::constants::BITS_IN_BYTE;
256 if data_bytes == 0 {
257 return;
258 }
259 debug_assert_eq!(
260 data_start.align_down(BYTES_IN_CHUNK),
261 (data_start + data_bytes - 1).align_down(BYTES_IN_CHUNK),
262 "The data to be zeroed in discontiguous specs needs to be in the same chunk"
263 );
264 let meta_start = address_to_meta_address(self, data_start);
265 let meta_start_shift = meta_byte_lshift(self, data_start);
266 let meta_total_bits = (data_bytes >> self.log_bytes_in_region) << self.log_num_of_bits;
268 let meta_delta_bytes = meta_total_bits >> LOG_BITS_IN_BYTE;
269 let meta_delta_bits: u8 = (meta_total_bits % BITS_IN_BYTE) as u8;
270 let (meta_end, meta_end_shift) = {
272 let mut end_addr = meta_start + meta_delta_bytes;
273 let mut end_bit = meta_start_shift + meta_delta_bits;
274 if end_bit >= BITS_IN_BYTE as u8 {
275 end_bit -= BITS_IN_BYTE as u8;
276 end_addr += 1usize;
277 }
278 (end_addr, end_bit)
279 };
280
281 update_meta_bits(meta_start, meta_start_shift, meta_end, meta_end_shift);
282 };
283
284 if cfg!(target_pointer_width = "64") || self.is_global {
285 update_contiguous(start, size);
286 }
287 #[cfg(target_pointer_width = "32")]
288 if !self.is_global {
289 let chunk_num = ((start + size).align_down(BYTES_IN_CHUNK)
291 - start.align_down(BYTES_IN_CHUNK))
292 / BYTES_IN_CHUNK;
293 if chunk_num == 0 {
294 update_discontiguous(start, size);
295 } else {
296 let second_data_chunk = start.align_up(BYTES_IN_CHUNK);
297 update_discontiguous(start, second_data_chunk - start);
299
300 let last_data_chunk = (start + size).align_down(BYTES_IN_CHUNK);
301 update_discontiguous(last_data_chunk, start + size - last_data_chunk);
303 let mut next_data_chunk = second_data_chunk;
304
305 while next_data_chunk != last_data_chunk {
307 update_discontiguous(next_data_chunk, BYTES_IN_CHUNK);
308 next_data_chunk += BYTES_IN_CHUNK;
309 }
310 }
311 }
312 }
313
314 pub fn bzero_metadata(&self, start: Address, size: usize) {
324 #[cfg(feature = "extreme_assertions")]
325 let _lock = sanity::SANITY_LOCK.lock().unwrap();
326
327 #[cfg(feature = "extreme_assertions")]
328 sanity::verify_bzero(self, start, size);
329
330 self.bulk_update_metadata(start, size, &Self::zero_meta_bits)
331 }
332
333 pub fn bset_metadata(&self, start: Address, size: usize) {
343 #[cfg(feature = "extreme_assertions")]
344 let _lock = sanity::SANITY_LOCK.lock().unwrap();
345
346 #[cfg(feature = "extreme_assertions")]
347 sanity::verify_bset(self, start, size);
348
349 self.bulk_update_metadata(start, size, &Self::set_meta_bits)
350 }
351
352 pub fn bcopy_metadata_contiguous(&self, start: Address, size: usize, other: &SideMetadataSpec) {
365 #[cfg(feature = "extreme_assertions")]
366 let _lock = sanity::SANITY_LOCK.lock().unwrap();
367
368 #[cfg(feature = "extreme_assertions")]
369 sanity::verify_bcopy(self, start, size, other);
370
371 debug_assert_eq!(other.log_bytes_in_region, self.log_bytes_in_region);
372 debug_assert_eq!(other.log_num_of_bits, self.log_num_of_bits);
373
374 let dst_meta_start_addr = address_to_meta_address(self, start);
375 let dst_meta_start_bit = meta_byte_lshift(self, start);
376 let dst_meta_end_addr = address_to_meta_address(self, start + size);
377 let dst_meta_end_bit = meta_byte_lshift(self, start + size);
378
379 let src_meta_start_addr = address_to_meta_address(other, start);
380 let src_meta_start_bit = meta_byte_lshift(other, start);
381
382 debug_assert_eq!(dst_meta_start_bit, src_meta_start_bit);
383
384 let mut visitor = |range| {
385 match range {
386 BitByteRange::Bytes {
387 start: dst_start,
388 end: dst_end,
389 } => unsafe {
390 let byte_offset = dst_start - dst_meta_start_addr;
391 let src_start = src_meta_start_addr + byte_offset;
392 let size = dst_end - dst_start;
393 std::ptr::copy::<u8>(src_start.to_ptr(), dst_start.to_mut_ptr(), size);
394 false
395 },
396 BitByteRange::BitsInByte {
397 addr: dst,
398 bit_start,
399 bit_end,
400 } => {
401 let byte_offset = dst - dst_meta_start_addr;
402 let src = src_meta_start_addr + byte_offset;
403 let mask: u8 = !(u8::MAX.checked_shl(bit_end as u32).unwrap_or(0))
405 & (u8::MAX << bit_start); let old_src = unsafe { src.as_ref::<AtomicU8>() }.load(Ordering::Relaxed);
407 let old_dst = unsafe { dst.as_ref::<AtomicU8>() }.load(Ordering::Relaxed);
408 let new = (old_src & mask) | (old_dst & !mask);
409 unsafe { dst.as_ref::<AtomicU8>() }.store(new, Ordering::Relaxed);
410 false
411 }
412 }
413 };
414
415 ranges::break_bit_range(
416 dst_meta_start_addr,
417 dst_meta_start_bit,
418 dst_meta_end_addr,
419 dst_meta_end_bit,
420 true,
421 &mut visitor,
422 );
423 }
424
425 #[allow(unused_variables)] fn side_metadata_access<
433 const CHECK_VALUE: bool,
434 T: MetadataValue,
435 R: Copy,
436 F: FnOnce() -> R,
437 V: FnOnce(R),
438 >(
439 &self,
440 data_addr: Address,
441 input: Option<T>,
442 access_func: F,
443 verify_func: V,
444 ) -> R {
445 #[cfg(feature = "extreme_assertions")]
451 let _lock = sanity::SANITY_LOCK.lock().unwrap();
452
453 #[cfg(debug_assertions)]
455 {
456 if CHECK_VALUE {
457 self.assert_value_type::<T>(input);
458 }
459 #[cfg(feature = "extreme_assertions")]
460 self.assert_metadata_mapped(data_addr);
461 }
462
463 let ret = access_func();
465
466 if CHECK_VALUE {
468 verify_func(ret);
469 }
470
471 ret
472 }
473
474 pub unsafe fn load<T: MetadataValue>(&self, data_addr: Address) -> T {
483 self.side_metadata_access::<true, T, _, _, _>(
484 data_addr,
485 None,
486 || {
487 let meta_addr = address_to_meta_address(self, data_addr);
488 let bits_num_log = self.log_num_of_bits;
489 if bits_num_log < 3 {
490 let lshift = meta_byte_lshift(self, data_addr);
491 let mask = meta_byte_mask(self) << lshift;
492 let byte_val = meta_addr.load::<u8>();
493
494 FromPrimitive::from_u8((byte_val & mask) >> lshift).unwrap()
495 } else {
496 meta_addr.load::<T>()
497 }
498 },
499 |_v| {
500 #[cfg(feature = "extreme_assertions")]
501 sanity::verify_load(self, data_addr, _v);
502 },
503 )
504 }
505
506 pub unsafe fn store<T: MetadataValue>(&self, data_addr: Address, metadata: T) {
515 self.side_metadata_access::<true, T, _, _, _>(
516 data_addr,
517 Some(metadata),
518 || {
519 let meta_addr = address_to_meta_address(self, data_addr);
520 let bits_num_log = self.log_num_of_bits;
521 if bits_num_log < 3 {
522 let lshift = meta_byte_lshift(self, data_addr);
523 let mask = meta_byte_mask(self) << lshift;
524 let old_val = meta_addr.load::<u8>();
525 let new_val = (old_val & !mask) | (metadata.to_u8().unwrap() << lshift);
526
527 meta_addr.store::<u8>(new_val);
528 } else {
529 meta_addr.store::<T>(metadata);
530 }
531 },
532 |_| {
533 #[cfg(feature = "extreme_assertions")]
534 sanity::verify_store(self, data_addr, metadata);
535 },
536 )
537 }
538
539 pub fn load_atomic<T: MetadataValue>(&self, data_addr: Address, order: Ordering) -> T {
542 self.side_metadata_access::<true, T, _, _, _>(
543 data_addr,
544 None,
545 || {
546 let meta_addr = address_to_meta_address(self, data_addr);
547 let bits_num_log = self.log_num_of_bits;
548 if bits_num_log < 3 {
549 let lshift = meta_byte_lshift(self, data_addr);
550 let mask = meta_byte_mask(self) << lshift;
551 let byte_val = unsafe { meta_addr.atomic_load::<AtomicU8>(order) };
552 FromPrimitive::from_u8((byte_val & mask) >> lshift).unwrap()
553 } else {
554 unsafe { T::load_atomic(meta_addr, order) }
555 }
556 },
557 |_v| {
558 #[cfg(feature = "extreme_assertions")]
559 sanity::verify_load(self, data_addr, _v);
560 },
561 )
562 }
563
564 pub fn store_atomic<T: MetadataValue>(&self, data_addr: Address, metadata: T, order: Ordering) {
567 self.side_metadata_access::<true, T, _, _, _>(
568 data_addr,
569 Some(metadata),
570 || {
571 let meta_addr = address_to_meta_address(self, data_addr);
572 let bits_num_log = self.log_num_of_bits;
573 if bits_num_log < 3 {
574 let lshift = meta_byte_lshift(self, data_addr);
575 let mask = meta_byte_mask(self) << lshift;
576 let metadata_u8 = metadata.to_u8().unwrap();
577 let _ = unsafe {
578 <u8 as MetadataValue>::fetch_update(meta_addr, order, order, |v: u8| {
579 Some((v & !mask) | (metadata_u8 << lshift))
580 })
581 };
582 } else {
583 unsafe {
584 T::store_atomic(meta_addr, metadata, order);
585 }
586 }
587 },
588 |_| {
589 #[cfg(feature = "extreme_assertions")]
590 sanity::verify_store(self, data_addr, metadata);
591 },
592 )
593 }
594
595 pub unsafe fn set_zero(&self, data_addr: Address) {
605 use num_traits::Zero;
606 match self.log_num_of_bits {
607 0..=3 => self.store(data_addr, u8::zero()),
608 4 => self.store(data_addr, u16::zero()),
609 5 => self.store(data_addr, u32::zero()),
610 6 => self.store(data_addr, u64::zero()),
611 _ => unreachable!(),
612 }
613 }
614
615 pub fn set_zero_atomic(&self, data_addr: Address, order: Ordering) {
618 use num_traits::Zero;
619 match self.log_num_of_bits {
620 0..=3 => self.store_atomic(data_addr, u8::zero(), order),
621 4 => self.store_atomic(data_addr, u16::zero(), order),
622 5 => self.store_atomic(data_addr, u32::zero(), order),
623 6 => self.store_atomic(data_addr, u64::zero(), order),
624 _ => unreachable!(),
625 }
626 }
627
628 pub unsafe fn set_raw_byte_atomic(&self, data_addr: Address, order: Ordering) {
639 debug_assert!(self.log_num_of_bits < 3);
640 cfg_if::cfg_if! {
641 if #[cfg(feature = "extreme_assertions")] {
642 self.store_atomic::<u8>(data_addr, 1, order)
644 } else {
645 self.side_metadata_access::<false, u8, _, _, _>(
646 data_addr,
647 Some(1u8),
648 || {
649 let meta_addr = address_to_meta_address(self, data_addr);
650 u8::store_atomic(meta_addr, 0xffu8, order);
651 },
652 |_| {}
653 )
654 }
655 }
656 }
657
658 pub unsafe fn load_raw_byte(&self, data_addr: Address) -> u8 {
666 debug_assert!(self.log_num_of_bits < 3);
667 self.side_metadata_access::<false, u8, _, _, _>(
668 data_addr,
669 None,
670 || {
671 let meta_addr = address_to_meta_address(self, data_addr);
672 meta_addr.load::<u8>()
673 },
674 |_| {},
675 )
676 }
677
678 pub unsafe fn load_raw_word(&self, data_addr: Address) -> usize {
686 use crate::util::constants::*;
687 debug_assert!(self.log_num_of_bits < (LOG_BITS_IN_BYTE + LOG_BYTES_IN_ADDRESS) as usize);
688 self.side_metadata_access::<false, usize, _, _, _>(
689 data_addr,
690 None,
691 || {
692 let meta_addr = address_to_meta_address(self, data_addr);
693 let aligned_meta_addr = meta_addr.align_down(BYTES_IN_ADDRESS);
694 aligned_meta_addr.load::<usize>()
695 },
696 |_| {},
697 )
698 }
699
700 pub fn compare_exchange_atomic<T: MetadataValue>(
705 &self,
706 data_addr: Address,
707 old_metadata: T,
708 new_metadata: T,
709 success_order: Ordering,
710 failure_order: Ordering,
711 ) -> std::result::Result<T, T> {
712 self.side_metadata_access::<true, T, _, _, _>(
713 data_addr,
714 Some(new_metadata),
715 || {
716 let meta_addr = address_to_meta_address(self, data_addr);
717 let bits_num_log = self.log_num_of_bits;
718 if bits_num_log < 3 {
719 let lshift = meta_byte_lshift(self, data_addr);
720 let mask = meta_byte_mask(self) << lshift;
721
722 let real_old_byte = unsafe { meta_addr.atomic_load::<AtomicU8>(success_order) };
723 let expected_old_byte =
724 (real_old_byte & !mask) | ((old_metadata.to_u8().unwrap()) << lshift);
725 let expected_new_byte =
726 (expected_old_byte & !mask) | ((new_metadata.to_u8().unwrap()) << lshift);
727
728 unsafe {
729 meta_addr.compare_exchange::<AtomicU8>(
730 expected_old_byte,
731 expected_new_byte,
732 success_order,
733 failure_order,
734 )
735 }
736 .map(|x| FromPrimitive::from_u8((x & mask) >> lshift).unwrap())
737 .map_err(|x| FromPrimitive::from_u8((x & mask) >> lshift).unwrap())
738 } else {
739 unsafe {
740 T::compare_exchange(
741 meta_addr,
742 old_metadata,
743 new_metadata,
744 success_order,
745 failure_order,
746 )
747 }
748 }
749 },
750 |_res| {
751 #[cfg(feature = "extreme_assertions")]
752 if _res.is_ok() {
753 sanity::verify_store(self, data_addr, new_metadata);
754 }
755 },
756 )
757 }
758
759 fn fetch_ops_on_bits<F: Fn(u8) -> u8>(
762 &self,
763 data_addr: Address,
764 meta_addr: Address,
765 set_order: Ordering,
766 fetch_order: Ordering,
767 update: F,
768 ) -> u8 {
769 let lshift = meta_byte_lshift(self, data_addr);
770 let mask = meta_byte_mask(self) << lshift;
771
772 let old_raw_byte = unsafe {
773 <u8 as MetadataValue>::fetch_update(
774 meta_addr,
775 set_order,
776 fetch_order,
777 |raw_byte: u8| {
778 let old_val = (raw_byte & mask) >> lshift;
779 let new_val = update(old_val);
780 let new_raw_byte = (raw_byte & !mask) | ((new_val << lshift) & mask);
781 Some(new_raw_byte)
782 },
783 )
784 }
785 .unwrap();
786 (old_raw_byte & mask) >> lshift
787 }
788
789 pub fn fetch_add_atomic<T: MetadataValue>(
793 &self,
794 data_addr: Address,
795 val: T,
796 order: Ordering,
797 ) -> T {
798 self.side_metadata_access::<true, T, _, _, _>(
799 data_addr,
800 Some(val),
801 || {
802 let meta_addr = address_to_meta_address(self, data_addr);
803 let bits_num_log = self.log_num_of_bits;
804 if bits_num_log < 3 {
805 FromPrimitive::from_u8(self.fetch_ops_on_bits(
806 data_addr,
807 meta_addr,
808 order,
809 order,
810 |x: u8| x.wrapping_add(val.to_u8().unwrap()),
811 ))
812 .unwrap()
813 } else {
814 unsafe { T::fetch_add(meta_addr, val, order) }
815 }
816 },
817 |_old_val| {
818 #[cfg(feature = "extreme_assertions")]
819 sanity::verify_update::<T>(self, data_addr, _old_val, _old_val.wrapping_add(&val))
820 },
821 )
822 }
823
824 pub fn fetch_sub_atomic<T: MetadataValue>(
828 &self,
829 data_addr: Address,
830 val: T,
831 order: Ordering,
832 ) -> T {
833 self.side_metadata_access::<true, T, _, _, _>(
834 data_addr,
835 Some(val),
836 || {
837 let meta_addr = address_to_meta_address(self, data_addr);
838 if self.log_num_of_bits < 3 {
839 FromPrimitive::from_u8(self.fetch_ops_on_bits(
840 data_addr,
841 meta_addr,
842 order,
843 order,
844 |x: u8| x.wrapping_sub(val.to_u8().unwrap()),
845 ))
846 .unwrap()
847 } else {
848 unsafe { T::fetch_sub(meta_addr, val, order) }
849 }
850 },
851 |_old_val| {
852 #[cfg(feature = "extreme_assertions")]
853 sanity::verify_update::<T>(self, data_addr, _old_val, _old_val.wrapping_sub(&val))
854 },
855 )
856 }
857
858 pub fn fetch_and_atomic<T: MetadataValue>(
862 &self,
863 data_addr: Address,
864 val: T,
865 order: Ordering,
866 ) -> T {
867 self.side_metadata_access::<true, T, _, _, _>(
868 data_addr,
869 Some(val),
870 || {
871 let meta_addr = address_to_meta_address(self, data_addr);
872 if self.log_num_of_bits < 3 {
873 let lshift = meta_byte_lshift(self, data_addr);
874 let mask = meta_byte_mask(self) << lshift;
875 let rhs = (val.to_u8().unwrap() << lshift) | !mask;
877 let old_raw_byte =
878 unsafe { <u8 as MetadataValue>::fetch_and(meta_addr, rhs, order) };
879 let old_val = (old_raw_byte & mask) >> lshift;
880 FromPrimitive::from_u8(old_val).unwrap()
881 } else {
882 unsafe { T::fetch_and(meta_addr, val, order) }
883 }
884 },
885 |_old_val| {
886 #[cfg(feature = "extreme_assertions")]
887 sanity::verify_update::<T>(self, data_addr, _old_val, _old_val.bitand(val))
888 },
889 )
890 }
891
892 pub fn fetch_or_atomic<T: MetadataValue>(
896 &self,
897 data_addr: Address,
898 val: T,
899 order: Ordering,
900 ) -> T {
901 self.side_metadata_access::<true, T, _, _, _>(
902 data_addr,
903 Some(val),
904 || {
905 let meta_addr = address_to_meta_address(self, data_addr);
906 if self.log_num_of_bits < 3 {
907 let lshift = meta_byte_lshift(self, data_addr);
908 let mask = meta_byte_mask(self) << lshift;
909 let rhs = (val.to_u8().unwrap() << lshift) & mask;
911 let old_raw_byte =
912 unsafe { <u8 as MetadataValue>::fetch_or(meta_addr, rhs, order) };
913 let old_val = (old_raw_byte & mask) >> lshift;
914 FromPrimitive::from_u8(old_val).unwrap()
915 } else {
916 unsafe { T::fetch_or(meta_addr, val, order) }
917 }
918 },
919 |_old_val| {
920 #[cfg(feature = "extreme_assertions")]
921 sanity::verify_update::<T>(self, data_addr, _old_val, _old_val.bitor(val))
922 },
923 )
924 }
925
926 pub fn fetch_update_atomic<T: MetadataValue, F: FnMut(T) -> Option<T> + Copy>(
930 &self,
931 data_addr: Address,
932 set_order: Ordering,
933 fetch_order: Ordering,
934 mut f: F,
935 ) -> std::result::Result<T, T> {
936 self.side_metadata_access::<true, T, _, _, _>(
937 data_addr,
938 None,
939 move || -> std::result::Result<T, T> {
940 let meta_addr = address_to_meta_address(self, data_addr);
941 if self.log_num_of_bits < 3 {
942 let lshift = meta_byte_lshift(self, data_addr);
943 let mask = meta_byte_mask(self) << lshift;
944
945 unsafe {
946 <u8 as MetadataValue>::fetch_update(
947 meta_addr,
948 set_order,
949 fetch_order,
950 |raw_byte: u8| {
951 let old_val = (raw_byte & mask) >> lshift;
952 f(FromPrimitive::from_u8(old_val).unwrap()).map(|new_val| {
953 (raw_byte & !mask)
954 | ((new_val.to_u8().unwrap() << lshift) & mask)
955 })
956 },
957 )
958 }
959 .map(|x| FromPrimitive::from_u8((x & mask) >> lshift).unwrap())
960 .map_err(|x| FromPrimitive::from_u8((x & mask) >> lshift).unwrap())
961 } else {
962 unsafe { T::fetch_update(meta_addr, set_order, fetch_order, f) }
963 }
964 },
965 |_result| {
966 #[cfg(feature = "extreme_assertions")]
967 if let Ok(old_val) = _result {
968 sanity::verify_update::<T>(self, data_addr, old_val, f(old_val).unwrap())
969 }
970 },
971 )
972 }
973
974 #[allow(clippy::let_and_return)]
988 pub unsafe fn find_prev_non_zero_value<T: MetadataValue>(
989 &self,
990 data_addr: Address,
991 search_limit_bytes: usize,
992 ) -> Option<Address> {
993 debug_assert!(search_limit_bytes > 0);
994
995 if self.uses_contiguous_side_metadata() {
996 let result = self.find_prev_non_zero_value_fast::<T>(data_addr, search_limit_bytes);
998 #[cfg(debug_assertions)]
999 {
1000 let result2 =
1002 self.find_prev_non_zero_value_simple::<T>(data_addr, search_limit_bytes);
1003 assert_eq!(result, result2, "find_prev_non_zero_value_fast returned a diffrent result from the naive implementation.");
1004 }
1005 result
1006 } else {
1007 warn!("We are trying to search non zero bits in an discontiguous side metadata. The performance is slow, as MMTk does not optimize for this case.");
1011 self.find_prev_non_zero_value_simple::<T>(data_addr, search_limit_bytes)
1012 }
1013 }
1014
1015 fn find_prev_non_zero_value_simple<T: MetadataValue>(
1016 &self,
1017 data_addr: Address,
1018 search_limit_bytes: usize,
1019 ) -> Option<Address> {
1020 let region_bytes = 1 << self.log_bytes_in_region;
1021 let start_addr = data_addr.align_down(region_bytes);
1023 let end_addr = data_addr.saturating_sub(search_limit_bytes) + 1usize;
1024
1025 let mut cursor = start_addr;
1026 while cursor >= end_addr {
1027 if !cursor.is_mapped() {
1029 return None;
1030 }
1031 if !unsafe { self.load::<T>(cursor).is_zero() } {
1033 return Some(cursor);
1034 }
1035 cursor -= region_bytes;
1036 }
1037 None
1038 }
1039
1040 #[allow(clippy::let_and_return)]
1041 fn find_prev_non_zero_value_fast<T: MetadataValue>(
1042 &self,
1043 data_addr: Address,
1044 search_limit_bytes: usize,
1045 ) -> Option<Address> {
1046 debug_assert!(self.uses_contiguous_side_metadata());
1047
1048 if !data_addr.is_mapped() {
1050 return None;
1051 }
1052 if !unsafe { self.load::<T>(data_addr).is_zero() } {
1054 return Some(data_addr.align_down(1 << self.log_bytes_in_region));
1055 }
1056
1057 let start_addr = data_addr.saturating_sub(search_limit_bytes) + 1usize;
1059 let end_addr = data_addr;
1060
1061 let start_meta_addr = address_to_contiguous_meta_address(self, start_addr);
1065 let start_meta_shift = meta_byte_lshift(self, start_addr);
1066 let end_meta_addr = address_to_contiguous_meta_address(self, end_addr);
1067 let end_meta_shift = meta_byte_lshift(self, end_addr);
1068
1069 let mut res = None;
1070
1071 let mut visitor = |range: BitByteRange| {
1072 match range {
1073 BitByteRange::Bytes { start, end } => {
1074 match helpers::find_last_non_zero_bit_in_metadata_bytes(start, end) {
1075 helpers::FindMetaBitResult::Found { addr, bit } => {
1076 let (addr, bit) = align_metadata_address(self, addr, bit);
1077 res = Some(contiguous_meta_address_to_address(self, addr, bit));
1078 true
1080 }
1081 helpers::FindMetaBitResult::UnmappedMetadata => true,
1083 helpers::FindMetaBitResult::NotFound => false,
1085 }
1086 }
1087 BitByteRange::BitsInByte {
1088 addr,
1089 bit_start,
1090 bit_end,
1091 } => {
1092 match helpers::find_last_non_zero_bit_in_metadata_bits(addr, bit_start, bit_end)
1093 {
1094 helpers::FindMetaBitResult::Found { addr, bit } => {
1095 let (addr, bit) = align_metadata_address(self, addr, bit);
1096 res = Some(contiguous_meta_address_to_address(self, addr, bit));
1097 true
1099 }
1100 helpers::FindMetaBitResult::UnmappedMetadata => true,
1102 helpers::FindMetaBitResult::NotFound => false,
1104 }
1105 }
1106 }
1107 };
1108
1109 ranges::break_bit_range(
1110 start_meta_addr,
1111 start_meta_shift,
1112 end_meta_addr,
1113 end_meta_shift,
1114 false,
1115 &mut visitor,
1116 );
1117
1118 res.map(|addr| addr.align_down(1 << self.log_bytes_in_region))
1123 .filter(|addr| *addr >= start_addr && *addr < end_addr)
1124 }
1125
1126 pub fn scan_non_zero_values<T: MetadataValue>(
1136 &self,
1137 data_start_addr: Address,
1138 data_end_addr: Address,
1139 visit_data: &mut impl FnMut(Address),
1140 ) {
1141 if self.uses_contiguous_side_metadata() && self.log_num_of_bits == 0 {
1142 self.scan_non_zero_values_fast(data_start_addr, data_end_addr, visit_data);
1147 } else {
1148 warn!(
1153 "We are trying to search for non zero bits in a discontiguous side metadata \
1154 or the metadata has more than one bit per region. \
1155 The performance is slow, as MMTk does not optimize for this case."
1156 );
1157 self.scan_non_zero_values_simple::<T>(data_start_addr, data_end_addr, visit_data);
1158 }
1159 }
1160
1161 fn scan_non_zero_values_simple<T: MetadataValue>(
1162 &self,
1163 data_start_addr: Address,
1164 data_end_addr: Address,
1165 visit_data: &mut impl FnMut(Address),
1166 ) {
1167 let region_bytes = 1usize << self.log_bytes_in_region;
1168
1169 let mut cursor = data_start_addr;
1170 while cursor < data_end_addr {
1171 debug_assert!(cursor.is_mapped());
1172
1173 if !unsafe { self.load::<T>(cursor).is_zero() } {
1175 visit_data(cursor);
1176 }
1177 cursor += region_bytes;
1178 }
1179 }
1180
1181 fn scan_non_zero_values_fast(
1182 &self,
1183 data_start_addr: Address,
1184 data_end_addr: Address,
1185 visit_data: &mut impl FnMut(Address),
1186 ) {
1187 debug_assert!(self.uses_contiguous_side_metadata());
1188 debug_assert_eq!(self.log_num_of_bits, 0);
1189
1190 let start_meta_addr = address_to_contiguous_meta_address(self, data_start_addr);
1192 let start_meta_shift = meta_byte_lshift(self, data_start_addr);
1193 let end_meta_addr = address_to_contiguous_meta_address(self, data_end_addr);
1194 let end_meta_shift = meta_byte_lshift(self, data_end_addr);
1195
1196 let mut visitor = |range| {
1197 match range {
1198 BitByteRange::Bytes { start, end } => {
1199 helpers::scan_non_zero_bits_in_metadata_bytes(start, end, &mut |addr, bit| {
1200 visit_data(helpers::contiguous_meta_address_to_address(self, addr, bit));
1201 });
1202 }
1203 BitByteRange::BitsInByte {
1204 addr,
1205 bit_start,
1206 bit_end,
1207 } => helpers::scan_non_zero_bits_in_metadata_bits(
1208 addr,
1209 bit_start,
1210 bit_end,
1211 &mut |addr, bit| {
1212 visit_data(helpers::contiguous_meta_address_to_address(self, addr, bit));
1213 },
1214 ),
1215 }
1216 false
1217 };
1218
1219 ranges::break_bit_range(
1220 start_meta_addr,
1221 start_meta_shift,
1222 end_meta_addr,
1223 end_meta_shift,
1224 true,
1225 &mut visitor,
1226 );
1227 }
1228}
1229
1230impl fmt::Debug for SideMetadataSpec {
1231 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1232 f.write_fmt(format_args!(
1233 "SideMetadataSpec {} {{ \
1234 **is_global: {:?} \
1235 **offset: 0x{:x} \
1236 **log_num_of_bits: 0x{:x} \
1237 **log_bytes_in_region: 0x{:x} \
1238 }}",
1239 self.name, self.is_global, self.offset, self.log_num_of_bits, self.log_bytes_in_region
1240 ))
1241 }
1242}
1243
1244pub const fn side_metadata_offset_after(spec: &SideMetadataSpec) -> usize {
1247 raw_align_up(spec.upper_bound_offset(), BYTES_IN_WORD)
1256}
1257
1258pub(crate) struct SideMetadataContext {
1261 pub global: Vec<SideMetadataSpec>,
1263 pub local: Vec<SideMetadataSpec>,
1265}
1266
1267impl SideMetadataContext {
1268 #[allow(clippy::vec_init_then_push)] pub fn new_global_specs(specs: &[SideMetadataSpec]) -> Vec<SideMetadataSpec> {
1270 let mut ret = vec![];
1271
1272 #[cfg(feature = "vo_bit")]
1273 ret.push(VO_BIT_SIDE_METADATA_SPEC);
1274
1275 if let Some(spec) = crate::mmtk::SFT_MAP.get_side_metadata() {
1276 if spec.is_global {
1277 ret.push(*spec);
1278 }
1279 }
1280
1281 ret.push(crate::util::heap::chunk_map::ChunkMap::ALLOC_TABLE);
1285
1286 ret.extend_from_slice(specs);
1287 ret
1288 }
1289
1290 pub fn get_local_specs(&self) -> &[SideMetadataSpec] {
1291 &self.local
1292 }
1293
1294 #[cfg(debug_assertions)]
1295 pub fn assert_metadata_ranges_in_reserved_range(
1296 &self,
1297 start: Address,
1298 size: usize,
1299 space_name: &str,
1300 ) {
1301 let reserved = {
1302 let base = super::layout::global_side_metadata_base_address();
1303 let bytes = super::layout::side_metadata_reserved_bytes();
1304 base..(base + bytes)
1305 };
1306 let check_spec = |spec: &SideMetadataSpec| {
1307 if !spec.uses_contiguous_side_metadata() {
1308 return;
1309 }
1310 let metadata_start = address_to_meta_address(spec, start);
1311 let mmap_start = metadata_start.align_down(BYTES_IN_PAGE);
1312 let metadata_size = data_to_meta_size_round_up(spec, size);
1313 let mmap_end = (metadata_start + metadata_size).align_up(BYTES_IN_PAGE);
1314 debug_assert!(
1315 mmap_start >= reserved.start && mmap_end <= reserved.end,
1316 "Side metadata range for spec {} in space {} is outside reserved range: [{}, {}) vs [{}, {})",
1317 spec.name,
1318 space_name,
1319 mmap_start,
1320 mmap_end,
1321 reserved.start,
1322 reserved.end
1323 );
1324 };
1325 self.global.iter().for_each(check_spec);
1326 self.local.iter().for_each(check_spec);
1327 }
1328
1329 pub fn calculate_reserved_pages(&self, data_pages: usize) -> usize {
1336 let mut total = 0;
1337 for spec in self.global.iter() {
1338 total += data_to_meta_size_round_up(spec, data_pages);
1342 }
1343 for spec in self.local.iter() {
1344 total += data_to_meta_size_round_up(spec, data_pages);
1345 }
1346 total
1347 }
1348
1349 pub fn try_map_metadata_space(
1355 &self,
1356 start: Address,
1357 size: usize,
1358 space_name: &str,
1359 ) -> MmapResult<()> {
1360 debug!(
1361 "try_map_metadata_space({}, 0x{:x}, {}, {})",
1362 start,
1363 size,
1364 self.global.len(),
1365 self.local.len()
1366 );
1367 debug_assert!(start.is_aligned_to(BYTES_IN_PAGE));
1369 debug_assert!(size % BYTES_IN_PAGE == 0);
1370 self.map_metadata_internal(start, size, false, space_name)
1371 }
1372
1373 pub fn try_map_metadata_address_range(
1378 &self,
1379 start: Address,
1380 size: usize,
1381 name: &str,
1382 ) -> MmapResult<()> {
1383 debug!(
1384 "try_map_metadata_address_range({}, 0x{:x}, {}, {})",
1385 start,
1386 size,
1387 self.global.len(),
1388 self.local.len()
1389 );
1390 debug_assert!(start.is_aligned_to(BYTES_IN_CHUNK));
1392 debug_assert!(size % BYTES_IN_CHUNK == 0);
1393 self.map_metadata_internal(start, size, true, name)
1394 }
1395
1396 fn map_metadata_internal(
1404 &self,
1405 start: Address,
1406 size: usize,
1407 no_reserve: bool,
1408 space_name: &str,
1409 ) -> MmapResult<()> {
1410 for spec in self.global.iter() {
1411 let anno = MmapAnnotation::SideMeta {
1412 space: space_name,
1413 meta: spec.name,
1414 };
1415 try_mmap_contiguous_metadata_space(start, size, spec, no_reserve, &anno)?;
1416 }
1417
1418 #[cfg(target_pointer_width = "32")]
1419 let mut lsize: usize = 0;
1420
1421 for spec in self.local.iter() {
1422 #[cfg(target_pointer_width = "64")]
1434 {
1435 let anno = MmapAnnotation::SideMeta {
1436 space: space_name,
1437 meta: spec.name,
1438 };
1439 try_mmap_contiguous_metadata_space(start, size, spec, no_reserve, &anno)?;
1440 }
1441 #[cfg(target_pointer_width = "32")]
1442 {
1443 lsize += metadata_bytes_per_chunk(spec.log_bytes_in_region, spec.log_num_of_bits);
1444 }
1445 }
1446
1447 #[cfg(target_pointer_width = "32")]
1448 if lsize > 0 {
1449 let max = BYTES_IN_CHUNK >> super::layout::LOG_LOCAL_SIDE_METADATA_WORST_CASE_RATIO;
1450 debug_assert!(
1451 lsize <= max,
1452 "local side metadata per chunk (0x{:x}) must be less than (0x{:x})",
1453 lsize,
1454 max
1455 );
1456 let anno = MmapAnnotation::SideMeta {
1459 space: space_name,
1460 meta: "all",
1461 };
1462 try_map_per_chunk_metadata_space(start, size, lsize, no_reserve, &anno)?;
1463 }
1464
1465 Ok(())
1466 }
1467
1468 #[cfg(test)]
1476 pub fn ensure_unmap_metadata_space(&self, start: Address, size: usize) {
1477 trace!("ensure_unmap_metadata_space({}, 0x{:x})", start, size);
1478 debug_assert!(start.is_aligned_to(BYTES_IN_PAGE));
1479 debug_assert!(size % BYTES_IN_PAGE == 0);
1480
1481 for spec in self.global.iter() {
1482 ensure_munmap_contiguous_metadata_space(start, size, spec);
1483 }
1484
1485 for spec in self.local.iter() {
1486 #[cfg(target_pointer_width = "64")]
1487 {
1488 ensure_munmap_contiguous_metadata_space(start, size, spec);
1489 }
1490 #[cfg(target_pointer_width = "32")]
1491 {
1492 ensure_munmap_chunked_metadata_space(start, size, spec);
1493 }
1494 }
1495 }
1496}
1497
1498pub struct MetadataByteArrayRef<const ENTRIES: usize> {
1500 #[cfg(feature = "extreme_assertions")]
1501 heap_range_start: Address,
1502 #[cfg(feature = "extreme_assertions")]
1503 spec: SideMetadataSpec,
1504 data: &'static [u8; ENTRIES],
1505}
1506
1507impl<const ENTRIES: usize> MetadataByteArrayRef<ENTRIES> {
1508 pub fn new(metadata_spec: &SideMetadataSpec, start: Address, bytes: usize) -> Self {
1517 debug_assert_eq!(
1518 metadata_spec.log_num_of_bits, LOG_BITS_IN_BYTE as usize,
1519 "Each heap entry should map to a byte in side-metadata"
1520 );
1521 debug_assert_eq!(
1522 bytes >> metadata_spec.log_bytes_in_region,
1523 ENTRIES,
1524 "Heap range size and MetadataByteArray size does not match"
1525 );
1526 Self {
1527 #[cfg(feature = "extreme_assertions")]
1528 heap_range_start: start,
1529 #[cfg(feature = "extreme_assertions")]
1530 spec: *metadata_spec,
1531 data: unsafe { &*address_to_meta_address(metadata_spec, start).to_ptr() },
1534 }
1535 }
1536
1537 #[allow(clippy::len_without_is_empty)]
1539 pub const fn len(&self) -> usize {
1540 ENTRIES
1541 }
1542
1543 #[allow(clippy::let_and_return)]
1545 pub fn get(&self, index: usize) -> u8 {
1546 #[cfg(feature = "extreme_assertions")]
1547 let _lock = sanity::SANITY_LOCK.lock().unwrap();
1548 let value = self.data[index];
1549 #[cfg(feature = "extreme_assertions")]
1550 {
1551 let data_addr = self.heap_range_start + (index << self.spec.log_bytes_in_region);
1552 sanity::verify_load::<u8>(&self.spec, data_addr, value);
1553 }
1554 value
1555 }
1556}
1557
1558#[cfg(test)]
1559mod tests {
1560 use super::*;
1561 use crate::mmap_anno_test;
1562 use crate::util::metadata::side_metadata::SideMetadataContext;
1563
1564 pub const ZERO_OFFSET: usize = 0;
1566
1567 #[test]
1568 fn calculate_reserved_pages_one_spec() {
1569 let spec = SideMetadataSpec {
1571 name: "test_spec",
1572 is_global: true,
1573 offset: ZERO_OFFSET,
1574 log_num_of_bits: 0,
1575 log_bytes_in_region: 3,
1576 };
1577 let side_metadata = SideMetadataContext {
1578 global: vec![spec],
1579 local: vec![],
1580 };
1581 assert_eq!(side_metadata.calculate_reserved_pages(0), 0);
1582 assert_eq!(side_metadata.calculate_reserved_pages(63), 1);
1583 assert_eq!(side_metadata.calculate_reserved_pages(64), 1);
1584 assert_eq!(side_metadata.calculate_reserved_pages(65), 2);
1585 assert_eq!(side_metadata.calculate_reserved_pages(1024), 16);
1586 }
1587
1588 #[test]
1589 fn calculate_reserved_pages_multi_specs() {
1590 let gspec = SideMetadataSpec {
1592 name: "gspec",
1593 is_global: true,
1594 offset: ZERO_OFFSET,
1595 log_num_of_bits: 0,
1596 log_bytes_in_region: 3,
1597 };
1598 let lspec = SideMetadataSpec {
1600 name: "lspec",
1601 is_global: false,
1602 offset: ZERO_OFFSET,
1603 log_num_of_bits: 1,
1604 log_bytes_in_region: 12,
1605 };
1606 let side_metadata = SideMetadataContext {
1607 global: vec![gspec],
1608 local: vec![lspec],
1609 };
1610 assert_eq!(side_metadata.calculate_reserved_pages(1024), 16 + 1);
1611 }
1612
1613 use crate::util::heap::layout::vm_layout;
1614 use crate::util::test_util::{serial_test, with_cleanup};
1615 use paste::paste;
1616
1617 const TEST_LOG_BYTES_IN_REGION: usize = 12;
1618
1619 fn test_side_metadata(
1620 log_bits: usize,
1621 f: impl Fn(&SideMetadataSpec, Address, Address) + std::panic::RefUnwindSafe,
1622 ) {
1623 serial_test(|| {
1624 core_test_initialize_side_metadata();
1625
1626 let spec = SideMetadataSpec {
1627 name: "Test Spec $tname",
1628 is_global: true,
1629 offset: 0,
1630 log_num_of_bits: log_bits,
1631 log_bytes_in_region: TEST_LOG_BYTES_IN_REGION, };
1633 let context = SideMetadataContext {
1634 global: vec![spec],
1635 local: vec![],
1636 };
1637 let mut sanity = SideMetadataSanity::new();
1638 sanity.verify_metadata_context("TestPolicy", &context);
1639
1640 let data_addr = vm_layout::vm_layout().heap_start;
1641 crate::MMAPPER
1643 .ensure_mapped(
1644 data_addr,
1645 1,
1646 HugePageSupport::No,
1647 MmapProtection::ReadWrite,
1648 mmap_anno_test!(),
1649 )
1650 .unwrap();
1651 let meta_addr = address_to_meta_address(&spec, data_addr);
1652 with_cleanup(
1653 || {
1654 let mmap_result =
1655 context.try_map_metadata_space(data_addr, BYTES_IN_PAGE, "test_space");
1656 assert!(mmap_result.is_ok(), "{:?}", mmap_result);
1657
1658 f(&spec, data_addr, meta_addr);
1659 },
1660 || {
1661 assert!(log_bits <= 6);
1663 let meta_ptr: *mut u64 = meta_addr.to_mut_ptr();
1664 unsafe { *meta_ptr = 0 };
1665
1666 sanity::reset();
1667 },
1668 )
1669 })
1670 }
1671
1672 fn max_value(log_bits: usize) -> u64 {
1673 (0..(1 << log_bits)).fold(0, |accum, x| accum + (1 << x))
1674 }
1675 #[test]
1676 fn test_max_value() {
1677 assert_eq!(max_value(0), 1);
1678 assert_eq!(max_value(1), 0b11);
1679 assert_eq!(max_value(2), 0b1111);
1680 assert_eq!(max_value(3), 255);
1681 assert_eq!(max_value(4), 65535);
1682 }
1683
1684 macro_rules! test_side_metadata_access {
1685 ($tname: ident, $type: ty, $log_bits: expr) => {
1686 paste!{
1687 #[test]
1688 fn [<$tname _load>]() {
1689 test_side_metadata($log_bits, |spec, data_addr, meta_addr| {
1690 let meta_ptr: *mut $type = meta_addr.to_mut_ptr();
1691
1692 assert_eq!(unsafe { spec.load::<$type>(data_addr) }, 0);
1694 assert_eq!(spec.load_atomic::<$type>(data_addr, Ordering::SeqCst), 0);
1695
1696 let max_value: $type = max_value($log_bits) as _;
1698 unsafe { spec.store::<$type>(data_addr, max_value); }
1699 assert_eq!(unsafe { spec.load::<$type>(data_addr) }, max_value);
1700 assert_eq!(spec.load_atomic::<$type>(data_addr, Ordering::SeqCst), max_value);
1701 assert_eq!(unsafe { *meta_ptr }, max_value);
1702 });
1703 }
1704
1705 #[test]
1706 fn [<$tname _store>]() {
1707 test_side_metadata($log_bits, |spec, data_addr, meta_addr| {
1708 let meta_ptr: *mut $type = meta_addr.to_mut_ptr();
1709 let max_value: $type = max_value($log_bits) as _;
1710
1711 unsafe { *meta_ptr = <$type>::MAX; }
1713 unsafe { spec.store::<$type>(data_addr, 0); }
1715 assert_eq!(unsafe { spec.load::<$type>(data_addr) }, 0);
1716 assert_eq!(unsafe { *meta_ptr }, <$type>::MAX & (!max_value));
1718 });
1719 }
1720
1721 #[test]
1722 fn [<$tname _atomic_store>]() {
1723 test_side_metadata($log_bits, |spec, data_addr, meta_addr| {
1724 let meta_ptr: *mut $type = meta_addr.to_mut_ptr();
1725 let max_value: $type = max_value($log_bits) as _;
1726
1727 unsafe { *meta_ptr = <$type>::MAX; }
1729 spec.store_atomic::<$type>(data_addr, 0, Ordering::SeqCst);
1731 assert_eq!(unsafe { spec.load::<$type>(data_addr) }, 0);
1732 assert_eq!(unsafe { *meta_ptr }, <$type>::MAX & (!max_value));
1734 });
1735 }
1736
1737 #[test]
1738 fn [<$tname _compare_exchange_success>]() {
1739 test_side_metadata($log_bits, |spec, data_addr, meta_addr| {
1740 let meta_ptr: *mut $type = meta_addr.to_mut_ptr();
1741 let max_value: $type = max_value($log_bits) as _;
1742 unsafe { *meta_ptr = <$type>::MAX; }
1744 spec.store_atomic::<$type>(data_addr, 1, Ordering::SeqCst);
1746
1747 let old_val = spec.load_atomic::<$type>(data_addr, Ordering::SeqCst);
1748 assert_eq!(old_val, 1);
1749
1750 let new_val = 0;
1751 let res = spec.compare_exchange_atomic::<$type>(data_addr, old_val, new_val, Ordering::SeqCst, Ordering::SeqCst);
1752 assert!(res.is_ok());
1753 assert_eq!(res.unwrap(), old_val, "old vals do not match");
1754
1755 let after_update = spec.load_atomic::<$type>(data_addr, Ordering::SeqCst);
1756 assert_eq!(after_update, new_val);
1757 assert_eq!(unsafe { *meta_ptr }, <$type>::MAX & (!max_value));
1759 });
1760 }
1761
1762 #[test]
1763 fn [<$tname _compare_exchange_fail>]() {
1764 test_side_metadata($log_bits, |spec, data_addr, meta_addr| {
1765 let meta_ptr: *mut $type = meta_addr.to_mut_ptr();
1766 unsafe { *meta_ptr = <$type>::MAX; }
1768 spec.store_atomic::<$type>(data_addr, 1, Ordering::SeqCst);
1770
1771 let old_val = spec.load_atomic::<$type>(data_addr, Ordering::SeqCst);
1772 assert_eq!(old_val, 1);
1773
1774 spec.store_atomic::<$type>(data_addr, 0, Ordering::SeqCst);
1776 let bits_before_cas = unsafe { *meta_ptr };
1777
1778 let new_val = 0;
1779 let res = spec.compare_exchange_atomic::<$type>(data_addr, old_val, new_val, Ordering::SeqCst, Ordering::SeqCst);
1780 assert!(res.is_err());
1781 assert_eq!(res.err().unwrap(), 0);
1782 let bits_after_cas = unsafe { *meta_ptr };
1783 assert_eq!(bits_before_cas, bits_after_cas);
1784 });
1785 }
1786
1787 #[test]
1788 fn [<$tname _fetch_add_1>]() {
1789 test_side_metadata($log_bits, |spec, data_addr, meta_addr| {
1790 let meta_ptr: *mut $type = meta_addr.to_mut_ptr();
1791 unsafe { *meta_ptr = <$type>::MAX; }
1793 spec.store_atomic::<$type>(data_addr, 0, Ordering::SeqCst);
1795
1796 let old_val = spec.load_atomic::<$type>(data_addr, Ordering::SeqCst);
1797
1798 let old_val_from_fetch = spec.fetch_add_atomic::<$type>(data_addr, 1, Ordering::SeqCst);
1799 assert_eq!(old_val_from_fetch, old_val);
1800
1801 let new_val = spec.load_atomic::<$type>(data_addr, Ordering::SeqCst);
1802 assert_eq!(new_val, 1);
1803 });
1804 }
1805
1806 #[test]
1807 fn [<$tname _fetch_add_max>]() {
1808 test_side_metadata($log_bits, |spec, data_addr, meta_addr| {
1809 let meta_ptr: *mut $type = meta_addr.to_mut_ptr();
1810 let max_value: $type = max_value($log_bits) as _;
1811 unsafe { *meta_ptr = <$type>::MAX; }
1813 spec.store_atomic::<$type>(data_addr, 0, Ordering::SeqCst);
1815
1816 let old_val = spec.load_atomic::<$type>(data_addr, Ordering::SeqCst);
1817
1818 let old_val_from_fetch = spec.fetch_add_atomic::<$type>(data_addr, max_value, Ordering::SeqCst);
1819 assert_eq!(old_val_from_fetch, old_val);
1820
1821 let new_val = spec.load_atomic::<$type>(data_addr, Ordering::SeqCst);
1822 assert_eq!(new_val, max_value);
1823 });
1824 }
1825
1826 #[test]
1827 fn [<$tname _fetch_add_overflow>]() {
1828 test_side_metadata($log_bits, |spec, data_addr, meta_addr| {
1829 let meta_ptr: *mut $type = meta_addr.to_mut_ptr();
1830 let max_value: $type = max_value($log_bits) as _;
1831 unsafe { *meta_ptr = <$type>::MAX; }
1833 spec.store_atomic::<$type>(data_addr, max_value, Ordering::SeqCst);
1835
1836 let old_val = spec.load_atomic::<$type>(data_addr, Ordering::SeqCst);
1837
1838 let old_val_from_fetch = spec.fetch_add_atomic::<$type>(data_addr, 1, Ordering::SeqCst);
1840 assert_eq!(old_val_from_fetch, old_val);
1841
1842 let new_val = spec.load_atomic::<$type>(data_addr, Ordering::SeqCst);
1843 assert_eq!(new_val, 0);
1844 });
1845 }
1846
1847 #[test]
1848 fn [<$tname _fetch_sub_1>]() {
1849 test_side_metadata($log_bits, |spec, data_addr, meta_addr| {
1850 let meta_ptr: *mut $type = meta_addr.to_mut_ptr();
1851 unsafe { *meta_ptr = <$type>::MAX; }
1853 spec.store_atomic::<$type>(data_addr, 1, Ordering::SeqCst);
1855
1856 let old_val = spec.load_atomic::<$type>(data_addr, Ordering::SeqCst);
1857
1858 let old_val_from_fetch = spec.fetch_sub_atomic::<$type>(data_addr, 1, Ordering::SeqCst);
1859 assert_eq!(old_val_from_fetch, old_val);
1860
1861 let new_val = spec.load_atomic::<$type>(data_addr, Ordering::SeqCst);
1862 assert_eq!(new_val, 0);
1863 });
1864 }
1865
1866 #[test]
1867 fn [<$tname _fetch_sub_max>]() {
1868 test_side_metadata($log_bits, |spec, data_addr, meta_addr| {
1869 let meta_ptr: *mut $type = meta_addr.to_mut_ptr();
1870 let max_value: $type = max_value($log_bits) as _;
1871 unsafe { *meta_ptr = <$type>::MAX; }
1873 spec.store_atomic::<$type>(data_addr, max_value, Ordering::SeqCst);
1875
1876 let old_val = spec.load_atomic::<$type>(data_addr, Ordering::SeqCst);
1877
1878 let old_val_from_fetch = spec.fetch_sub_atomic::<$type>(data_addr, max_value, Ordering::SeqCst);
1879 assert_eq!(old_val_from_fetch, old_val);
1880
1881 let new_val = spec.load_atomic::<$type>(data_addr, Ordering::SeqCst);
1882 assert_eq!(new_val, 0);
1883 });
1884 }
1885
1886 #[test]
1887 fn [<$tname _fetch_sub_overflow>]() {
1888 test_side_metadata($log_bits, |spec, data_addr, meta_addr| {
1889 let meta_ptr: *mut $type = meta_addr.to_mut_ptr();
1890 let max_value: $type = max_value($log_bits) as _;
1891 unsafe { *meta_ptr = <$type>::MAX; }
1893 spec.store_atomic::<$type>(data_addr, 0, Ordering::SeqCst);
1895
1896 let old_val = spec.load_atomic::<$type>(data_addr, Ordering::SeqCst);
1897
1898 let old_val_from_fetch = spec.fetch_sub_atomic::<$type>(data_addr, 1, Ordering::SeqCst);
1900 assert_eq!(old_val_from_fetch, old_val);
1901
1902 let new_val = spec.load_atomic::<$type>(data_addr, Ordering::SeqCst);
1903 assert_eq!(new_val, max_value);
1904 });
1905 }
1906
1907 #[test]
1908 fn [<$tname _fetch_and>]() {
1909 test_side_metadata($log_bits, |spec, data_addr, meta_addr| {
1910 let meta_ptr: *mut $type = meta_addr.to_mut_ptr();
1911 let max_value: $type = max_value($log_bits) as _;
1912 unsafe { *meta_ptr = <$type>::MAX; }
1914 spec.store_atomic::<$type>(data_addr, max_value, Ordering::SeqCst);
1916
1917 let old_val = spec.load_atomic::<$type>(data_addr, Ordering::SeqCst);
1919 let old_val_from_fetch = spec.fetch_and_atomic::<$type>(data_addr, max_value, Ordering::SeqCst);
1920 assert_eq!(old_val_from_fetch, old_val, "old values do not match");
1921 assert_eq!(spec.load_atomic::<$type>(data_addr, Ordering::SeqCst), max_value, "load values do not match");
1922 assert_eq!(unsafe { *meta_ptr }, <$type>::MAX, "raw values do not match");
1923
1924 let last_bit_zero = max_value - 1;
1926 let old_val = spec.load_atomic::<$type>(data_addr, Ordering::SeqCst);
1927 let old_val_from_fetch = spec.fetch_and_atomic::<$type>(data_addr, last_bit_zero, Ordering::SeqCst);
1928 assert_eq!(old_val_from_fetch, old_val);
1929 assert_eq!(spec.load_atomic::<$type>(data_addr, Ordering::SeqCst), last_bit_zero);
1930 assert_eq!(unsafe { *meta_ptr }, <$type>::MAX - 1);
1931 });
1932 }
1933
1934 #[test]
1935 fn [<$tname _fetch_or>]() {
1936 test_side_metadata($log_bits, |spec, data_addr, meta_addr| {
1937 let meta_ptr: *mut $type = meta_addr.to_mut_ptr();
1938 let max_value: $type = max_value($log_bits) as _;
1939 unsafe { *meta_ptr = 0; }
1941 spec.store_atomic::<$type>(data_addr, 0, Ordering::SeqCst);
1943
1944 let old_val = spec.load_atomic::<$type>(data_addr, Ordering::SeqCst);
1946 let old_val_from_fetch = spec.fetch_or_atomic::<$type>(data_addr, 0, Ordering::SeqCst);
1947 assert_eq!(old_val_from_fetch, old_val);
1948 assert_eq!(spec.load_atomic::<$type>(data_addr, Ordering::SeqCst), 0);
1949 assert_eq!(unsafe { *meta_ptr }, 0);
1950
1951 let old_val = spec.load_atomic::<$type>(data_addr, Ordering::SeqCst);
1953 let old_val_from_fetch = spec.fetch_or_atomic::<$type>(data_addr, max_value, Ordering::SeqCst);
1954 assert_eq!(old_val_from_fetch, old_val);
1955 assert_eq!(spec.load_atomic::<$type>(data_addr, Ordering::SeqCst), max_value);
1956 assert_eq!(unsafe { *meta_ptr }, max_value);
1957 });
1958 }
1959
1960 #[test]
1961 fn [<$tname _fetch_update_success>]() {
1962 test_side_metadata($log_bits, |spec, data_addr, meta_addr| {
1963 let meta_ptr: *mut $type = meta_addr.to_mut_ptr();
1964 let max_value: $type = max_value($log_bits) as _;
1965 unsafe { *meta_ptr = <$type>::MAX; }
1967 spec.store_atomic::<$type>(data_addr, max_value, Ordering::SeqCst);
1969
1970 let old_val = spec.load_atomic::<$type>(data_addr, Ordering::SeqCst);
1972 let fetch_res = spec.fetch_update_atomic::<$type, _>(data_addr, Ordering::SeqCst, Ordering::SeqCst, |_x: $type| Some(0));
1973 assert!(fetch_res.is_ok());
1974 assert_eq!(fetch_res.unwrap(), old_val);
1975 assert_eq!(spec.load_atomic::<$type>(data_addr, Ordering::SeqCst), 0);
1976 assert_eq!(unsafe { *meta_ptr }, <$type>::MAX & (!max_value));
1978 });
1979 }
1980
1981 #[test]
1982 fn [<$tname _fetch_update_fail>]() {
1983 test_side_metadata($log_bits, |spec, data_addr, meta_addr| {
1984 let meta_ptr: *mut $type = meta_addr.to_mut_ptr();
1985 let max_value: $type = max_value($log_bits) as _;
1986 unsafe { *meta_ptr = <$type>::MAX; }
1988 spec.store_atomic::<$type>(data_addr, max_value, Ordering::SeqCst);
1990
1991 let old_val = spec.load_atomic::<$type>(data_addr, Ordering::SeqCst);
1993 let fetch_res = spec.fetch_update_atomic::<$type, _>(data_addr, Ordering::SeqCst, Ordering::SeqCst, |_x: $type| None);
1994 assert!(fetch_res.is_err());
1995 assert_eq!(fetch_res.err().unwrap(), old_val);
1996 assert_eq!(spec.load_atomic::<$type>(data_addr, Ordering::SeqCst), max_value);
1997 assert_eq!(unsafe { *meta_ptr }, <$type>::MAX);
1999 });
2000 }
2001
2002 #[test]
2003 fn [<$tname _find_prev_non_zero_value_easy>]() {
2004 test_side_metadata($log_bits, |spec, data_addr, _meta_addr| {
2005 let max_value: $type = max_value($log_bits) as _;
2006 spec.store_atomic::<$type>(data_addr, max_value, Ordering::SeqCst);
2008
2009 let res_addr = unsafe { spec.find_prev_non_zero_value::<$type>(data_addr, 8) };
2012 assert!(res_addr.is_some());
2013 assert_eq!(res_addr.unwrap(), data_addr);
2014 });
2015 }
2016
2017 #[test]
2018 fn [<$tname _find_prev_non_zero_value_arbitrary_bytes>]() {
2019 test_side_metadata($log_bits, |spec, data_addr, _meta_addr| {
2020 let max_value: $type = max_value($log_bits) as _;
2021 spec.store_atomic::<$type>(data_addr, max_value, Ordering::SeqCst);
2023
2024 let test_region = (1 << TEST_LOG_BYTES_IN_REGION);
2026 for len in 1..(test_region*4) {
2027 let start_addr = data_addr + len;
2028 let res_addr = unsafe { spec.find_prev_non_zero_value::<$type>(start_addr, len + 1) };
2030 assert!(res_addr.is_some());
2031 assert_eq!(res_addr.unwrap(), data_addr);
2032 }
2033 });
2034 }
2035
2036 #[test]
2037 fn [<$tname _find_prev_non_zero_value_arbitrary_start>]() {
2038 test_side_metadata($log_bits, |spec, data_addr, _meta_addr| {
2039 let max_value: $type = max_value($log_bits) as _;
2040
2041 for offset in 0..7usize {
2043 let test_data_addr = data_addr + offset;
2045 spec.store_atomic::<$type>(test_data_addr, max_value, Ordering::SeqCst);
2046
2047 let res_addr = unsafe { spec.find_prev_non_zero_value::<$type>(test_data_addr, 4096) };
2049 assert!(res_addr.is_some());
2050 assert_eq!(res_addr.unwrap(), data_addr);
2051
2052 spec.store_atomic::<$type>(test_data_addr, 0, Ordering::SeqCst);
2054 }
2055 });
2056 }
2057
2058 #[test]
2059 fn [<$tname _find_prev_non_zero_value_no_find>]() {
2060 test_side_metadata($log_bits, |spec, data_addr, _meta_addr| {
2061 spec.store_atomic::<$type>(data_addr, 0, Ordering::SeqCst);
2063
2064 let test_region = (1 << TEST_LOG_BYTES_IN_REGION);
2066 for len in 1..(test_region*4) {
2067 let start_addr = data_addr + len;
2068 let res_addr = unsafe { spec.find_prev_non_zero_value::<$type>(start_addr, len + 1) };
2070 assert!(res_addr.is_none());
2071 }
2072 });
2073 }
2074 }
2075 }
2076 }
2077
2078 test_side_metadata_access!(test_u1, u8, 0);
2079 test_side_metadata_access!(test_u2, u8, 1);
2080 test_side_metadata_access!(test_u4, u8, 2);
2081 test_side_metadata_access!(test_u8, u8, 3);
2082 test_side_metadata_access!(test_u16, u16, 4);
2083 test_side_metadata_access!(test_u32, u32, 5);
2084 test_side_metadata_access!(test_u64, u64, 6);
2085 test_side_metadata_access!(
2086 test_usize,
2087 usize,
2088 if cfg!(target_pointer_width = "64") {
2089 6
2090 } else if cfg!(target_pointer_width = "32") {
2091 5
2092 } else {
2093 unreachable!()
2094 }
2095 );
2096
2097 #[test]
2098 fn test_bulk_update_meta_bits() {
2099 let raw_mem =
2100 unsafe { std::alloc::alloc_zeroed(std::alloc::Layout::from_size_align(8, 8).unwrap()) };
2101 let addr = Address::from_mut_ptr(raw_mem);
2102
2103 SideMetadataSpec::set_meta_bits(addr, 0, addr, 4);
2104 assert_eq!(unsafe { addr.load::<u64>() }, 0b1111);
2105
2106 SideMetadataSpec::zero_meta_bits(addr, 1, addr, 3);
2107 assert_eq!(unsafe { addr.load::<u64>() }, 0b1001);
2108
2109 SideMetadataSpec::set_meta_bits(addr, 2, addr, 6);
2110 assert_eq!(unsafe { addr.load::<u64>() }, 0b0011_1101);
2111
2112 SideMetadataSpec::zero_meta_bits(addr, 0, addr + 1usize, 0);
2113 assert_eq!(unsafe { addr.load::<u64>() }, 0b0);
2114
2115 SideMetadataSpec::set_meta_bits(addr, 2, addr + 1usize, 2);
2116 assert_eq!(unsafe { addr.load::<u64>() }, 0b11_1111_1100);
2117
2118 SideMetadataSpec::set_meta_bits(addr, 0, addr + 1usize, 2);
2119 assert_eq!(unsafe { addr.load::<u64>() }, 0b11_1111_1111);
2120 }
2121}