1use super::*;
2use crate::util::constants::{BYTES_IN_PAGE, BYTES_IN_WORD, LOG_BITS_IN_BYTE};
3use crate::util::conversions::raw_align_up;
4use crate::util::heap::layout::vm_layout::BYTES_IN_CHUNK;
5use crate::util::memory::{self, MmapAnnotation};
6use crate::util::metadata::metadata_val_traits::*;
7#[cfg(feature = "vo_bit")]
8use crate::util::metadata::vo_bit::VO_BIT_SIDE_METADATA_SPEC;
9use crate::util::Address;
10use num_traits::FromPrimitive;
11use ranges::BitByteRange;
12use std::fmt;
13use std::io::Result;
14use std::sync::atomic::{AtomicU8, Ordering};
15
16#[derive(Clone, Copy, PartialEq, Eq, Hash)]
23pub struct SideMetadataSpec {
24 pub name: &'static str,
26 pub is_global: bool,
29 pub offset: SideMetadataOffset,
31 pub log_num_of_bits: usize,
33 pub log_bytes_in_region: usize,
35}
36
37impl SideMetadataSpec {
38 pub const fn uses_contiguous_side_metadata(&self) -> bool {
40 self.is_global || cfg!(target_pointer_width = "64")
41 }
42
43 pub const fn is_absolute_offset(&self) -> bool {
45 self.uses_contiguous_side_metadata()
46 }
47
48 pub const fn is_rel_offset(&self) -> bool {
50 !self.is_absolute_offset()
51 }
52
53 pub const fn get_absolute_offset(&self) -> Address {
55 debug_assert!(self.is_absolute_offset());
56 unsafe { self.offset.addr }
57 }
58
59 pub const fn get_rel_offset(&self) -> usize {
61 debug_assert!(self.is_rel_offset());
62 unsafe { self.offset.rel_offset }
63 }
64
65 #[cfg(target_pointer_width = "64")]
67 pub const fn upper_bound_offset(&self) -> SideMetadataOffset {
68 debug_assert!(self.is_absolute_offset());
69 SideMetadataOffset {
70 addr: unsafe { self.offset.addr }
71 .add(crate::util::metadata::side_metadata::metadata_address_range_size(self)),
72 }
73 }
74
75 #[cfg(target_pointer_width = "32")]
77 pub const fn upper_bound_offset(&self) -> SideMetadataOffset {
78 if self.is_absolute_offset() {
79 SideMetadataOffset {
80 addr: unsafe { self.offset.addr }
81 .add(crate::util::metadata::side_metadata::metadata_address_range_size(self)),
82 }
83 } else {
84 SideMetadataOffset {
85 rel_offset: unsafe { self.offset.rel_offset }
86 + crate::util::metadata::side_metadata::metadata_bytes_per_chunk(
87 self.log_bytes_in_region,
88 self.log_num_of_bits,
89 ),
90 }
91 }
92 }
93
94 pub const fn upper_bound_address_for_contiguous(&self) -> Address {
99 debug_assert!(self.is_absolute_offset());
100 unsafe { self.upper_bound_offset().addr }
101 }
102
103 #[cfg(target_pointer_width = "32")]
108 pub const fn upper_bound_address_for_chunked(&self, data_addr: Address) -> Address {
109 debug_assert!(self.is_rel_offset());
110 address_to_meta_chunk_addr(data_addr).add(unsafe { self.upper_bound_offset().rel_offset })
111 }
112
113 #[cfg(debug_assertions)]
116 pub(crate) fn assert_metadata_mapped(&self, data_addr: Address) {
117 let meta_start = address_to_meta_address(self, data_addr).align_down(BYTES_IN_PAGE);
118
119 trace!(
120 "ensure_metadata_is_mapped({}).meta_start({})",
121 data_addr,
122 meta_start
123 );
124
125 memory::panic_if_unmapped(
126 meta_start,
127 BYTES_IN_PAGE,
128 &MmapAnnotation::Misc {
129 name: "assert_metadata_mapped",
130 },
131 );
132 }
133
134 #[cfg(debug_assertions)]
135 pub(crate) fn are_different_metadata_bits(&self, addr1: Address, addr2: Address) -> bool {
136 let a1 = address_to_meta_address(self, addr1);
137 let a2 = address_to_meta_address(self, addr2);
138 let s1 = meta_byte_lshift(self, addr1);
139 let s2 = meta_byte_lshift(self, addr2);
140 (a1, s1) != (a2, s2)
141 }
142
143 #[cfg(debug_assertions)]
147 fn assert_value_type<T: MetadataValue>(&self, val: Option<T>) {
148 let log_b = self.log_num_of_bits;
149 match log_b {
150 _ if log_b < 3 => {
151 assert_eq!(T::LOG2, 3);
152 if let Some(v) = val {
153 assert!(
154 v.to_u8().unwrap() < (1 << (1 << log_b)),
155 "Input value {:?} is invalid for the spec {:?}",
156 v,
157 self
158 );
159 }
160 }
161 3..=6 => assert_eq!(T::LOG2, log_b as u32),
162 _ => unreachable!("side metadata > {}-bits is not supported", 1 << log_b),
163 }
164 }
165
166 pub(crate) fn is_mapped(&self, data_addr: Address) -> bool {
168 use crate::MMAPPER;
169 let meta_addr = address_to_meta_address(self, data_addr);
170 MMAPPER.is_mapped_address(meta_addr)
171 }
172
173 pub(crate) fn zero_meta_bits(
175 meta_start_addr: Address,
176 meta_start_bit: u8,
177 meta_end_addr: Address,
178 meta_end_bit: u8,
179 ) {
180 let mut visitor = |range| {
181 match range {
182 BitByteRange::Bytes { start, end } => {
183 memory::zero(start, end - start);
184 false
185 }
186 BitByteRange::BitsInByte {
187 addr,
188 bit_start,
189 bit_end,
190 } => {
191 let mask: u8 =
194 u8::MAX.checked_shl(bit_end as u32).unwrap_or(0) | !(u8::MAX << bit_start);
195 unsafe { addr.as_ref::<AtomicU8>() }.fetch_and(mask, Ordering::SeqCst);
196 false
197 }
198 }
199 };
200 ranges::break_bit_range(
201 meta_start_addr,
202 meta_start_bit,
203 meta_end_addr,
204 meta_end_bit,
205 true,
206 &mut visitor,
207 );
208 }
209
210 pub(crate) fn set_meta_bits(
212 meta_start_addr: Address,
213 meta_start_bit: u8,
214 meta_end_addr: Address,
215 meta_end_bit: u8,
216 ) {
217 let mut visitor = |range| {
218 match range {
219 BitByteRange::Bytes { start, end } => {
220 memory::set(start, 0xff, end - start);
221 false
222 }
223 BitByteRange::BitsInByte {
224 addr,
225 bit_start,
226 bit_end,
227 } => {
228 let mask: u8 = !(u8::MAX.checked_shl(bit_end as u32).unwrap_or(0))
231 & (u8::MAX << bit_start);
232 unsafe { addr.as_ref::<AtomicU8>() }.fetch_or(mask, Ordering::SeqCst);
233 false
234 }
235 }
236 };
237 ranges::break_bit_range(
238 meta_start_addr,
239 meta_start_bit,
240 meta_end_addr,
241 meta_end_bit,
242 true,
243 &mut visitor,
244 );
245 }
246
247 pub(super) fn bulk_update_metadata(
250 &self,
251 start: Address,
252 size: usize,
253 update_meta_bits: &impl Fn(Address, u8, Address, u8),
254 ) {
255 let update_contiguous = |data_start: Address, data_bytes: usize| {
258 if data_bytes == 0 {
259 return;
260 }
261 let meta_start = address_to_meta_address(self, data_start);
262 let meta_start_shift = meta_byte_lshift(self, data_start);
263 let meta_end = address_to_meta_address(self, data_start + data_bytes);
264 let meta_end_shift = meta_byte_lshift(self, data_start + data_bytes);
265 update_meta_bits(meta_start, meta_start_shift, meta_end, meta_end_shift);
266 };
267
268 #[cfg(target_pointer_width = "32")]
276 let update_discontiguous = |data_start: Address, data_bytes: usize| {
277 use crate::util::constants::BITS_IN_BYTE;
278 if data_bytes == 0 {
279 return;
280 }
281 debug_assert_eq!(
282 data_start.align_down(BYTES_IN_CHUNK),
283 (data_start + data_bytes - 1).align_down(BYTES_IN_CHUNK),
284 "The data to be zeroed in discontiguous specs needs to be in the same chunk"
285 );
286 let meta_start = address_to_meta_address(self, data_start);
287 let meta_start_shift = meta_byte_lshift(self, data_start);
288 let meta_total_bits = (data_bytes >> self.log_bytes_in_region) << self.log_num_of_bits;
290 let meta_delta_bytes = meta_total_bits >> LOG_BITS_IN_BYTE;
291 let meta_delta_bits: u8 = (meta_total_bits % BITS_IN_BYTE) as u8;
292 let (meta_end, meta_end_shift) = {
294 let mut end_addr = meta_start + meta_delta_bytes;
295 let mut end_bit = meta_start_shift + meta_delta_bits;
296 if end_bit >= BITS_IN_BYTE as u8 {
297 end_bit -= BITS_IN_BYTE as u8;
298 end_addr += 1usize;
299 }
300 (end_addr, end_bit)
301 };
302
303 update_meta_bits(meta_start, meta_start_shift, meta_end, meta_end_shift);
304 };
305
306 if cfg!(target_pointer_width = "64") || self.is_global {
307 update_contiguous(start, size);
308 }
309 #[cfg(target_pointer_width = "32")]
310 if !self.is_global {
311 let chunk_num = ((start + size).align_down(BYTES_IN_CHUNK)
313 - start.align_down(BYTES_IN_CHUNK))
314 / BYTES_IN_CHUNK;
315 if chunk_num == 0 {
316 update_discontiguous(start, size);
317 } else {
318 let second_data_chunk = start.align_up(BYTES_IN_CHUNK);
319 update_discontiguous(start, second_data_chunk - start);
321
322 let last_data_chunk = (start + size).align_down(BYTES_IN_CHUNK);
323 update_discontiguous(last_data_chunk, start + size - last_data_chunk);
325 let mut next_data_chunk = second_data_chunk;
326
327 while next_data_chunk != last_data_chunk {
329 update_discontiguous(next_data_chunk, BYTES_IN_CHUNK);
330 next_data_chunk += BYTES_IN_CHUNK;
331 }
332 }
333 }
334 }
335
336 pub fn bzero_metadata(&self, start: Address, size: usize) {
346 #[cfg(feature = "extreme_assertions")]
347 let _lock = sanity::SANITY_LOCK.lock().unwrap();
348
349 #[cfg(feature = "extreme_assertions")]
350 sanity::verify_bzero(self, start, size);
351
352 self.bulk_update_metadata(start, size, &Self::zero_meta_bits)
353 }
354
355 pub fn bset_metadata(&self, start: Address, size: usize) {
365 #[cfg(feature = "extreme_assertions")]
366 let _lock = sanity::SANITY_LOCK.lock().unwrap();
367
368 #[cfg(feature = "extreme_assertions")]
369 sanity::verify_bset(self, start, size);
370
371 self.bulk_update_metadata(start, size, &Self::set_meta_bits)
372 }
373
374 pub fn bcopy_metadata_contiguous(&self, start: Address, size: usize, other: &SideMetadataSpec) {
387 #[cfg(feature = "extreme_assertions")]
388 let _lock = sanity::SANITY_LOCK.lock().unwrap();
389
390 #[cfg(feature = "extreme_assertions")]
391 sanity::verify_bcopy(self, start, size, other);
392
393 debug_assert_eq!(other.log_bytes_in_region, self.log_bytes_in_region);
394 debug_assert_eq!(other.log_num_of_bits, self.log_num_of_bits);
395
396 let dst_meta_start_addr = address_to_meta_address(self, start);
397 let dst_meta_start_bit = meta_byte_lshift(self, start);
398 let dst_meta_end_addr = address_to_meta_address(self, start + size);
399 let dst_meta_end_bit = meta_byte_lshift(self, start + size);
400
401 let src_meta_start_addr = address_to_meta_address(other, start);
402 let src_meta_start_bit = meta_byte_lshift(other, start);
403
404 debug_assert_eq!(dst_meta_start_bit, src_meta_start_bit);
405
406 let mut visitor = |range| {
407 match range {
408 BitByteRange::Bytes {
409 start: dst_start,
410 end: dst_end,
411 } => unsafe {
412 let byte_offset = dst_start - dst_meta_start_addr;
413 let src_start = src_meta_start_addr + byte_offset;
414 let size = dst_end - dst_start;
415 std::ptr::copy::<u8>(src_start.to_ptr(), dst_start.to_mut_ptr(), size);
416 false
417 },
418 BitByteRange::BitsInByte {
419 addr: dst,
420 bit_start,
421 bit_end,
422 } => {
423 let byte_offset = dst - dst_meta_start_addr;
424 let src = src_meta_start_addr + byte_offset;
425 let mask: u8 = !(u8::MAX.checked_shl(bit_end as u32).unwrap_or(0))
427 & (u8::MAX << bit_start); let old_src = unsafe { src.as_ref::<AtomicU8>() }.load(Ordering::Relaxed);
429 let old_dst = unsafe { dst.as_ref::<AtomicU8>() }.load(Ordering::Relaxed);
430 let new = (old_src & mask) | (old_dst & !mask);
431 unsafe { dst.as_ref::<AtomicU8>() }.store(new, Ordering::Relaxed);
432 false
433 }
434 }
435 };
436
437 ranges::break_bit_range(
438 dst_meta_start_addr,
439 dst_meta_start_bit,
440 dst_meta_end_addr,
441 dst_meta_end_bit,
442 true,
443 &mut visitor,
444 );
445 }
446
447 #[allow(unused_variables)] fn side_metadata_access<
455 const CHECK_VALUE: bool,
456 T: MetadataValue,
457 R: Copy,
458 F: FnOnce() -> R,
459 V: FnOnce(R),
460 >(
461 &self,
462 data_addr: Address,
463 input: Option<T>,
464 access_func: F,
465 verify_func: V,
466 ) -> R {
467 #[cfg(feature = "extreme_assertions")]
473 let _lock = sanity::SANITY_LOCK.lock().unwrap();
474
475 #[cfg(debug_assertions)]
477 {
478 if CHECK_VALUE {
479 self.assert_value_type::<T>(input);
480 }
481 #[cfg(feature = "extreme_assertions")]
482 self.assert_metadata_mapped(data_addr);
483 }
484
485 let ret = access_func();
487
488 if CHECK_VALUE {
490 verify_func(ret);
491 }
492
493 ret
494 }
495
496 pub unsafe fn load<T: MetadataValue>(&self, data_addr: Address) -> T {
505 self.side_metadata_access::<true, T, _, _, _>(
506 data_addr,
507 None,
508 || {
509 let meta_addr = address_to_meta_address(self, data_addr);
510 let bits_num_log = self.log_num_of_bits;
511 if bits_num_log < 3 {
512 let lshift = meta_byte_lshift(self, data_addr);
513 let mask = meta_byte_mask(self) << lshift;
514 let byte_val = meta_addr.load::<u8>();
515
516 FromPrimitive::from_u8((byte_val & mask) >> lshift).unwrap()
517 } else {
518 meta_addr.load::<T>()
519 }
520 },
521 |_v| {
522 #[cfg(feature = "extreme_assertions")]
523 sanity::verify_load(self, data_addr, _v);
524 },
525 )
526 }
527
528 pub unsafe fn store<T: MetadataValue>(&self, data_addr: Address, metadata: T) {
537 self.side_metadata_access::<true, T, _, _, _>(
538 data_addr,
539 Some(metadata),
540 || {
541 let meta_addr = address_to_meta_address(self, data_addr);
542 let bits_num_log = self.log_num_of_bits;
543 if bits_num_log < 3 {
544 let lshift = meta_byte_lshift(self, data_addr);
545 let mask = meta_byte_mask(self) << lshift;
546 let old_val = meta_addr.load::<u8>();
547 let new_val = (old_val & !mask) | (metadata.to_u8().unwrap() << lshift);
548
549 meta_addr.store::<u8>(new_val);
550 } else {
551 meta_addr.store::<T>(metadata);
552 }
553 },
554 |_| {
555 #[cfg(feature = "extreme_assertions")]
556 sanity::verify_store(self, data_addr, metadata);
557 },
558 )
559 }
560
561 pub fn load_atomic<T: MetadataValue>(&self, data_addr: Address, order: Ordering) -> T {
564 self.side_metadata_access::<true, T, _, _, _>(
565 data_addr,
566 None,
567 || {
568 let meta_addr = address_to_meta_address(self, data_addr);
569 let bits_num_log = self.log_num_of_bits;
570 if bits_num_log < 3 {
571 let lshift = meta_byte_lshift(self, data_addr);
572 let mask = meta_byte_mask(self) << lshift;
573 let byte_val = unsafe { meta_addr.atomic_load::<AtomicU8>(order) };
574 FromPrimitive::from_u8((byte_val & mask) >> lshift).unwrap()
575 } else {
576 unsafe { T::load_atomic(meta_addr, order) }
577 }
578 },
579 |_v| {
580 #[cfg(feature = "extreme_assertions")]
581 sanity::verify_load(self, data_addr, _v);
582 },
583 )
584 }
585
586 pub fn store_atomic<T: MetadataValue>(&self, data_addr: Address, metadata: T, order: Ordering) {
589 self.side_metadata_access::<true, T, _, _, _>(
590 data_addr,
591 Some(metadata),
592 || {
593 let meta_addr = address_to_meta_address(self, data_addr);
594 let bits_num_log = self.log_num_of_bits;
595 if bits_num_log < 3 {
596 let lshift = meta_byte_lshift(self, data_addr);
597 let mask = meta_byte_mask(self) << lshift;
598 let metadata_u8 = metadata.to_u8().unwrap();
599 let _ = unsafe {
600 <u8 as MetadataValue>::fetch_update(meta_addr, order, order, |v: u8| {
601 Some((v & !mask) | (metadata_u8 << lshift))
602 })
603 };
604 } else {
605 unsafe {
606 T::store_atomic(meta_addr, metadata, order);
607 }
608 }
609 },
610 |_| {
611 #[cfg(feature = "extreme_assertions")]
612 sanity::verify_store(self, data_addr, metadata);
613 },
614 )
615 }
616
617 pub unsafe fn set_zero(&self, data_addr: Address) {
627 use num_traits::Zero;
628 match self.log_num_of_bits {
629 0..=3 => self.store(data_addr, u8::zero()),
630 4 => self.store(data_addr, u16::zero()),
631 5 => self.store(data_addr, u32::zero()),
632 6 => self.store(data_addr, u64::zero()),
633 _ => unreachable!(),
634 }
635 }
636
637 pub fn set_zero_atomic(&self, data_addr: Address, order: Ordering) {
640 use num_traits::Zero;
641 match self.log_num_of_bits {
642 0..=3 => self.store_atomic(data_addr, u8::zero(), order),
643 4 => self.store_atomic(data_addr, u16::zero(), order),
644 5 => self.store_atomic(data_addr, u32::zero(), order),
645 6 => self.store_atomic(data_addr, u64::zero(), order),
646 _ => unreachable!(),
647 }
648 }
649
650 pub unsafe fn set_raw_byte_atomic(&self, data_addr: Address, order: Ordering) {
661 debug_assert!(self.log_num_of_bits < 3);
662 cfg_if::cfg_if! {
663 if #[cfg(feature = "extreme_assertions")] {
664 self.store_atomic::<u8>(data_addr, 1, order)
666 } else {
667 self.side_metadata_access::<false, u8, _, _, _>(
668 data_addr,
669 Some(1u8),
670 || {
671 let meta_addr = address_to_meta_address(self, data_addr);
672 u8::store_atomic(meta_addr, 0xffu8, order);
673 },
674 |_| {}
675 )
676 }
677 }
678 }
679
680 pub unsafe fn load_raw_byte(&self, data_addr: Address) -> u8 {
688 debug_assert!(self.log_num_of_bits < 3);
689 self.side_metadata_access::<false, u8, _, _, _>(
690 data_addr,
691 None,
692 || {
693 let meta_addr = address_to_meta_address(self, data_addr);
694 meta_addr.load::<u8>()
695 },
696 |_| {},
697 )
698 }
699
700 pub unsafe fn load_raw_word(&self, data_addr: Address) -> usize {
708 use crate::util::constants::*;
709 debug_assert!(self.log_num_of_bits < (LOG_BITS_IN_BYTE + LOG_BYTES_IN_ADDRESS) as usize);
710 self.side_metadata_access::<false, usize, _, _, _>(
711 data_addr,
712 None,
713 || {
714 let meta_addr = address_to_meta_address(self, data_addr);
715 let aligned_meta_addr = meta_addr.align_down(BYTES_IN_ADDRESS);
716 aligned_meta_addr.load::<usize>()
717 },
718 |_| {},
719 )
720 }
721
722 pub fn compare_exchange_atomic<T: MetadataValue>(
727 &self,
728 data_addr: Address,
729 old_metadata: T,
730 new_metadata: T,
731 success_order: Ordering,
732 failure_order: Ordering,
733 ) -> std::result::Result<T, T> {
734 self.side_metadata_access::<true, T, _, _, _>(
735 data_addr,
736 Some(new_metadata),
737 || {
738 let meta_addr = address_to_meta_address(self, data_addr);
739 let bits_num_log = self.log_num_of_bits;
740 if bits_num_log < 3 {
741 let lshift = meta_byte_lshift(self, data_addr);
742 let mask = meta_byte_mask(self) << lshift;
743
744 let real_old_byte = unsafe { meta_addr.atomic_load::<AtomicU8>(success_order) };
745 let expected_old_byte =
746 (real_old_byte & !mask) | ((old_metadata.to_u8().unwrap()) << lshift);
747 let expected_new_byte =
748 (expected_old_byte & !mask) | ((new_metadata.to_u8().unwrap()) << lshift);
749
750 unsafe {
751 meta_addr.compare_exchange::<AtomicU8>(
752 expected_old_byte,
753 expected_new_byte,
754 success_order,
755 failure_order,
756 )
757 }
758 .map(|x| FromPrimitive::from_u8((x & mask) >> lshift).unwrap())
759 .map_err(|x| FromPrimitive::from_u8((x & mask) >> lshift).unwrap())
760 } else {
761 unsafe {
762 T::compare_exchange(
763 meta_addr,
764 old_metadata,
765 new_metadata,
766 success_order,
767 failure_order,
768 )
769 }
770 }
771 },
772 |_res| {
773 #[cfg(feature = "extreme_assertions")]
774 if _res.is_ok() {
775 sanity::verify_store(self, data_addr, new_metadata);
776 }
777 },
778 )
779 }
780
781 fn fetch_ops_on_bits<F: Fn(u8) -> u8>(
784 &self,
785 data_addr: Address,
786 meta_addr: Address,
787 set_order: Ordering,
788 fetch_order: Ordering,
789 update: F,
790 ) -> u8 {
791 let lshift = meta_byte_lshift(self, data_addr);
792 let mask = meta_byte_mask(self) << lshift;
793
794 let old_raw_byte = unsafe {
795 <u8 as MetadataValue>::fetch_update(
796 meta_addr,
797 set_order,
798 fetch_order,
799 |raw_byte: u8| {
800 let old_val = (raw_byte & mask) >> lshift;
801 let new_val = update(old_val);
802 let new_raw_byte = (raw_byte & !mask) | ((new_val << lshift) & mask);
803 Some(new_raw_byte)
804 },
805 )
806 }
807 .unwrap();
808 (old_raw_byte & mask) >> lshift
809 }
810
811 pub fn fetch_add_atomic<T: MetadataValue>(
815 &self,
816 data_addr: Address,
817 val: T,
818 order: Ordering,
819 ) -> T {
820 self.side_metadata_access::<true, T, _, _, _>(
821 data_addr,
822 Some(val),
823 || {
824 let meta_addr = address_to_meta_address(self, data_addr);
825 let bits_num_log = self.log_num_of_bits;
826 if bits_num_log < 3 {
827 FromPrimitive::from_u8(self.fetch_ops_on_bits(
828 data_addr,
829 meta_addr,
830 order,
831 order,
832 |x: u8| x.wrapping_add(val.to_u8().unwrap()),
833 ))
834 .unwrap()
835 } else {
836 unsafe { T::fetch_add(meta_addr, val, order) }
837 }
838 },
839 |_old_val| {
840 #[cfg(feature = "extreme_assertions")]
841 sanity::verify_update::<T>(self, data_addr, _old_val, _old_val.wrapping_add(&val))
842 },
843 )
844 }
845
846 pub fn fetch_sub_atomic<T: MetadataValue>(
850 &self,
851 data_addr: Address,
852 val: T,
853 order: Ordering,
854 ) -> T {
855 self.side_metadata_access::<true, T, _, _, _>(
856 data_addr,
857 Some(val),
858 || {
859 let meta_addr = address_to_meta_address(self, data_addr);
860 if self.log_num_of_bits < 3 {
861 FromPrimitive::from_u8(self.fetch_ops_on_bits(
862 data_addr,
863 meta_addr,
864 order,
865 order,
866 |x: u8| x.wrapping_sub(val.to_u8().unwrap()),
867 ))
868 .unwrap()
869 } else {
870 unsafe { T::fetch_sub(meta_addr, val, order) }
871 }
872 },
873 |_old_val| {
874 #[cfg(feature = "extreme_assertions")]
875 sanity::verify_update::<T>(self, data_addr, _old_val, _old_val.wrapping_sub(&val))
876 },
877 )
878 }
879
880 pub fn fetch_and_atomic<T: MetadataValue>(
884 &self,
885 data_addr: Address,
886 val: T,
887 order: Ordering,
888 ) -> T {
889 self.side_metadata_access::<true, T, _, _, _>(
890 data_addr,
891 Some(val),
892 || {
893 let meta_addr = address_to_meta_address(self, data_addr);
894 if self.log_num_of_bits < 3 {
895 let lshift = meta_byte_lshift(self, data_addr);
896 let mask = meta_byte_mask(self) << lshift;
897 let rhs = (val.to_u8().unwrap() << lshift) | !mask;
899 let old_raw_byte =
900 unsafe { <u8 as MetadataValue>::fetch_and(meta_addr, rhs, order) };
901 let old_val = (old_raw_byte & mask) >> lshift;
902 FromPrimitive::from_u8(old_val).unwrap()
903 } else {
904 unsafe { T::fetch_and(meta_addr, val, order) }
905 }
906 },
907 |_old_val| {
908 #[cfg(feature = "extreme_assertions")]
909 sanity::verify_update::<T>(self, data_addr, _old_val, _old_val.bitand(val))
910 },
911 )
912 }
913
914 pub fn fetch_or_atomic<T: MetadataValue>(
918 &self,
919 data_addr: Address,
920 val: T,
921 order: Ordering,
922 ) -> T {
923 self.side_metadata_access::<true, T, _, _, _>(
924 data_addr,
925 Some(val),
926 || {
927 let meta_addr = address_to_meta_address(self, data_addr);
928 if self.log_num_of_bits < 3 {
929 let lshift = meta_byte_lshift(self, data_addr);
930 let mask = meta_byte_mask(self) << lshift;
931 let rhs = (val.to_u8().unwrap() << lshift) & mask;
933 let old_raw_byte =
934 unsafe { <u8 as MetadataValue>::fetch_or(meta_addr, rhs, order) };
935 let old_val = (old_raw_byte & mask) >> lshift;
936 FromPrimitive::from_u8(old_val).unwrap()
937 } else {
938 unsafe { T::fetch_or(meta_addr, val, order) }
939 }
940 },
941 |_old_val| {
942 #[cfg(feature = "extreme_assertions")]
943 sanity::verify_update::<T>(self, data_addr, _old_val, _old_val.bitor(val))
944 },
945 )
946 }
947
948 pub fn fetch_update_atomic<T: MetadataValue, F: FnMut(T) -> Option<T> + Copy>(
952 &self,
953 data_addr: Address,
954 set_order: Ordering,
955 fetch_order: Ordering,
956 mut f: F,
957 ) -> std::result::Result<T, T> {
958 self.side_metadata_access::<true, T, _, _, _>(
959 data_addr,
960 None,
961 move || -> std::result::Result<T, T> {
962 let meta_addr = address_to_meta_address(self, data_addr);
963 if self.log_num_of_bits < 3 {
964 let lshift = meta_byte_lshift(self, data_addr);
965 let mask = meta_byte_mask(self) << lshift;
966
967 unsafe {
968 <u8 as MetadataValue>::fetch_update(
969 meta_addr,
970 set_order,
971 fetch_order,
972 |raw_byte: u8| {
973 let old_val = (raw_byte & mask) >> lshift;
974 f(FromPrimitive::from_u8(old_val).unwrap()).map(|new_val| {
975 (raw_byte & !mask)
976 | ((new_val.to_u8().unwrap() << lshift) & mask)
977 })
978 },
979 )
980 }
981 .map(|x| FromPrimitive::from_u8((x & mask) >> lshift).unwrap())
982 .map_err(|x| FromPrimitive::from_u8((x & mask) >> lshift).unwrap())
983 } else {
984 unsafe { T::fetch_update(meta_addr, set_order, fetch_order, f) }
985 }
986 },
987 |_result| {
988 #[cfg(feature = "extreme_assertions")]
989 if let Ok(old_val) = _result {
990 sanity::verify_update::<T>(self, data_addr, old_val, f(old_val).unwrap())
991 }
992 },
993 )
994 }
995
996 #[allow(clippy::let_and_return)]
1010 pub unsafe fn find_prev_non_zero_value<T: MetadataValue>(
1011 &self,
1012 data_addr: Address,
1013 search_limit_bytes: usize,
1014 ) -> Option<Address> {
1015 debug_assert!(search_limit_bytes > 0);
1016
1017 if self.uses_contiguous_side_metadata() {
1018 let result = self.find_prev_non_zero_value_fast::<T>(data_addr, search_limit_bytes);
1020 #[cfg(debug_assertions)]
1021 {
1022 let result2 =
1024 self.find_prev_non_zero_value_simple::<T>(data_addr, search_limit_bytes);
1025 assert_eq!(result, result2, "find_prev_non_zero_value_fast returned a diffrent result from the naive implementation.");
1026 }
1027 result
1028 } else {
1029 warn!("We are trying to search non zero bits in an discontiguous side metadata. The performance is slow, as MMTk does not optimize for this case.");
1033 self.find_prev_non_zero_value_simple::<T>(data_addr, search_limit_bytes)
1034 }
1035 }
1036
1037 fn find_prev_non_zero_value_simple<T: MetadataValue>(
1038 &self,
1039 data_addr: Address,
1040 search_limit_bytes: usize,
1041 ) -> Option<Address> {
1042 let region_bytes = 1 << self.log_bytes_in_region;
1043 let start_addr = data_addr.align_down(region_bytes);
1045 let end_addr = data_addr.saturating_sub(search_limit_bytes) + 1usize;
1046
1047 let mut cursor = start_addr;
1048 while cursor >= end_addr {
1049 if !cursor.is_mapped() {
1051 return None;
1052 }
1053 if !unsafe { self.load::<T>(cursor).is_zero() } {
1055 return Some(cursor);
1056 }
1057 cursor -= region_bytes;
1058 }
1059 None
1060 }
1061
1062 #[allow(clippy::let_and_return)]
1063 fn find_prev_non_zero_value_fast<T: MetadataValue>(
1064 &self,
1065 data_addr: Address,
1066 search_limit_bytes: usize,
1067 ) -> Option<Address> {
1068 debug_assert!(self.uses_contiguous_side_metadata());
1069
1070 if !data_addr.is_mapped() {
1072 return None;
1073 }
1074 if !unsafe { self.load::<T>(data_addr).is_zero() } {
1076 return Some(data_addr.align_down(1 << self.log_bytes_in_region));
1077 }
1078
1079 let start_addr = data_addr.saturating_sub(search_limit_bytes) + 1usize;
1081 let end_addr = data_addr;
1082
1083 let start_meta_addr = address_to_contiguous_meta_address(self, start_addr);
1087 let start_meta_shift = meta_byte_lshift(self, start_addr);
1088 let end_meta_addr = address_to_contiguous_meta_address(self, end_addr);
1089 let end_meta_shift = meta_byte_lshift(self, end_addr);
1090
1091 let mut res = None;
1092
1093 let mut visitor = |range: BitByteRange| {
1094 match range {
1095 BitByteRange::Bytes { start, end } => {
1096 match helpers::find_last_non_zero_bit_in_metadata_bytes(start, end) {
1097 helpers::FindMetaBitResult::Found { addr, bit } => {
1098 let (addr, bit) = align_metadata_address(self, addr, bit);
1099 res = Some(contiguous_meta_address_to_address(self, addr, bit));
1100 true
1102 }
1103 helpers::FindMetaBitResult::UnmappedMetadata => true,
1105 helpers::FindMetaBitResult::NotFound => false,
1107 }
1108 }
1109 BitByteRange::BitsInByte {
1110 addr,
1111 bit_start,
1112 bit_end,
1113 } => {
1114 match helpers::find_last_non_zero_bit_in_metadata_bits(addr, bit_start, bit_end)
1115 {
1116 helpers::FindMetaBitResult::Found { addr, bit } => {
1117 let (addr, bit) = align_metadata_address(self, addr, bit);
1118 res = Some(contiguous_meta_address_to_address(self, addr, bit));
1119 true
1121 }
1122 helpers::FindMetaBitResult::UnmappedMetadata => true,
1124 helpers::FindMetaBitResult::NotFound => false,
1126 }
1127 }
1128 }
1129 };
1130
1131 ranges::break_bit_range(
1132 start_meta_addr,
1133 start_meta_shift,
1134 end_meta_addr,
1135 end_meta_shift,
1136 false,
1137 &mut visitor,
1138 );
1139
1140 res.map(|addr| addr.align_down(1 << self.log_bytes_in_region))
1145 .filter(|addr| *addr >= start_addr && *addr < end_addr)
1146 }
1147
1148 pub fn scan_non_zero_values<T: MetadataValue>(
1158 &self,
1159 data_start_addr: Address,
1160 data_end_addr: Address,
1161 visit_data: &mut impl FnMut(Address),
1162 ) {
1163 if self.uses_contiguous_side_metadata() && self.log_num_of_bits == 0 {
1164 self.scan_non_zero_values_fast(data_start_addr, data_end_addr, visit_data);
1169 } else {
1170 warn!(
1175 "We are trying to search for non zero bits in a discontiguous side metadata \
1176 or the metadata has more than one bit per region. \
1177 The performance is slow, as MMTk does not optimize for this case."
1178 );
1179 self.scan_non_zero_values_simple::<T>(data_start_addr, data_end_addr, visit_data);
1180 }
1181 }
1182
1183 fn scan_non_zero_values_simple<T: MetadataValue>(
1184 &self,
1185 data_start_addr: Address,
1186 data_end_addr: Address,
1187 visit_data: &mut impl FnMut(Address),
1188 ) {
1189 let region_bytes = 1usize << self.log_bytes_in_region;
1190
1191 let mut cursor = data_start_addr;
1192 while cursor < data_end_addr {
1193 debug_assert!(cursor.is_mapped());
1194
1195 if !unsafe { self.load::<T>(cursor).is_zero() } {
1197 visit_data(cursor);
1198 }
1199 cursor += region_bytes;
1200 }
1201 }
1202
1203 fn scan_non_zero_values_fast(
1204 &self,
1205 data_start_addr: Address,
1206 data_end_addr: Address,
1207 visit_data: &mut impl FnMut(Address),
1208 ) {
1209 debug_assert!(self.uses_contiguous_side_metadata());
1210 debug_assert_eq!(self.log_num_of_bits, 0);
1211
1212 let start_meta_addr = address_to_contiguous_meta_address(self, data_start_addr);
1214 let start_meta_shift = meta_byte_lshift(self, data_start_addr);
1215 let end_meta_addr = address_to_contiguous_meta_address(self, data_end_addr);
1216 let end_meta_shift = meta_byte_lshift(self, data_end_addr);
1217
1218 let mut visitor = |range| {
1219 match range {
1220 BitByteRange::Bytes { start, end } => {
1221 helpers::scan_non_zero_bits_in_metadata_bytes(start, end, &mut |addr, bit| {
1222 visit_data(helpers::contiguous_meta_address_to_address(self, addr, bit));
1223 });
1224 }
1225 BitByteRange::BitsInByte {
1226 addr,
1227 bit_start,
1228 bit_end,
1229 } => helpers::scan_non_zero_bits_in_metadata_bits(
1230 addr,
1231 bit_start,
1232 bit_end,
1233 &mut |addr, bit| {
1234 visit_data(helpers::contiguous_meta_address_to_address(self, addr, bit));
1235 },
1236 ),
1237 }
1238 false
1239 };
1240
1241 ranges::break_bit_range(
1242 start_meta_addr,
1243 start_meta_shift,
1244 end_meta_addr,
1245 end_meta_shift,
1246 true,
1247 &mut visitor,
1248 );
1249 }
1250}
1251
1252impl fmt::Debug for SideMetadataSpec {
1253 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1254 f.write_fmt(format_args!(
1255 "SideMetadataSpec {} {{ \
1256 **is_global: {:?} \
1257 **offset: {} \
1258 **log_num_of_bits: 0x{:x} \
1259 **log_bytes_in_region: 0x{:x} \
1260 }}",
1261 self.name,
1262 self.is_global,
1263 unsafe {
1264 if self.is_absolute_offset() {
1265 format!("0x{:x}", self.offset.addr)
1266 } else {
1267 format!("0x{:x}", self.offset.rel_offset)
1268 }
1269 },
1270 self.log_num_of_bits,
1271 self.log_bytes_in_region
1272 ))
1273 }
1274}
1275
1276#[derive(Clone, Copy)]
1280pub union SideMetadataOffset {
1281 addr: Address,
1282 rel_offset: usize,
1283}
1284
1285impl SideMetadataOffset {
1286 pub const fn addr(addr: Address) -> Self {
1288 SideMetadataOffset { addr }
1289 }
1290
1291 pub const fn rel(rel_offset: usize) -> Self {
1293 SideMetadataOffset { rel_offset }
1294 }
1295
1296 pub const fn layout_after(spec: &SideMetadataSpec) -> SideMetadataOffset {
1298 let upper_bound_offset = spec.upper_bound_offset();
1307 if spec.is_absolute_offset() {
1308 let addr = unsafe { upper_bound_offset.addr };
1309 let aligned_addr = addr.align_up(BYTES_IN_WORD);
1310 SideMetadataOffset::addr(aligned_addr)
1311 } else {
1312 let rel_offset = unsafe { upper_bound_offset.rel_offset };
1313 let aligned_rel_offset = raw_align_up(rel_offset, BYTES_IN_WORD);
1314 SideMetadataOffset::rel(aligned_rel_offset)
1315 }
1316 }
1317}
1318
1319impl PartialEq for SideMetadataOffset {
1322 fn eq(&self, other: &Self) -> bool {
1323 unsafe { self.rel_offset == other.rel_offset }
1324 }
1325}
1326impl Eq for SideMetadataOffset {}
1327
1328impl std::hash::Hash for SideMetadataOffset {
1329 fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
1330 unsafe { self.rel_offset }.hash(state);
1331 }
1332}
1333
1334pub(crate) struct SideMetadataContext {
1337 pub global: Vec<SideMetadataSpec>,
1339 pub local: Vec<SideMetadataSpec>,
1341}
1342
1343impl SideMetadataContext {
1344 #[allow(clippy::vec_init_then_push)] pub fn new_global_specs(specs: &[SideMetadataSpec]) -> Vec<SideMetadataSpec> {
1346 let mut ret = vec![];
1347
1348 #[cfg(feature = "vo_bit")]
1349 ret.push(VO_BIT_SIDE_METADATA_SPEC);
1350
1351 if let Some(spec) = crate::mmtk::SFT_MAP.get_side_metadata() {
1352 if spec.is_global {
1353 ret.push(*spec);
1354 }
1355 }
1356
1357 ret.push(crate::util::heap::chunk_map::ChunkMap::ALLOC_TABLE);
1361
1362 ret.extend_from_slice(specs);
1363 ret
1364 }
1365
1366 pub fn get_local_specs(&self) -> &[SideMetadataSpec] {
1367 &self.local
1368 }
1369
1370 pub fn calculate_reserved_pages(&self, data_pages: usize) -> usize {
1377 let mut total = 0;
1378 for spec in self.global.iter() {
1379 total += data_to_meta_size_round_up(spec, data_pages);
1383 }
1384 for spec in self.local.iter() {
1385 total += data_to_meta_size_round_up(spec, data_pages);
1386 }
1387 total
1388 }
1389
1390 pub fn try_map_metadata_space(
1396 &self,
1397 start: Address,
1398 size: usize,
1399 space_name: &str,
1400 ) -> Result<()> {
1401 debug!(
1402 "try_map_metadata_space({}, 0x{:x}, {}, {})",
1403 start,
1404 size,
1405 self.global.len(),
1406 self.local.len()
1407 );
1408 debug_assert!(start.is_aligned_to(BYTES_IN_PAGE));
1410 debug_assert!(size % BYTES_IN_PAGE == 0);
1411 self.map_metadata_internal(start, size, false, space_name)
1412 }
1413
1414 pub fn try_map_metadata_address_range(
1419 &self,
1420 start: Address,
1421 size: usize,
1422 name: &str,
1423 ) -> Result<()> {
1424 debug!(
1425 "try_map_metadata_address_range({}, 0x{:x}, {}, {})",
1426 start,
1427 size,
1428 self.global.len(),
1429 self.local.len()
1430 );
1431 debug_assert!(start.is_aligned_to(BYTES_IN_CHUNK));
1433 debug_assert!(size % BYTES_IN_CHUNK == 0);
1434 self.map_metadata_internal(start, size, true, name)
1435 }
1436
1437 fn map_metadata_internal(
1445 &self,
1446 start: Address,
1447 size: usize,
1448 no_reserve: bool,
1449 space_name: &str,
1450 ) -> Result<()> {
1451 for spec in self.global.iter() {
1452 let anno = MmapAnnotation::SideMeta {
1453 space: space_name,
1454 meta: spec.name,
1455 };
1456 match try_mmap_contiguous_metadata_space(start, size, spec, no_reserve, &anno) {
1457 Ok(_) => {}
1458 Err(e) => return Result::Err(e),
1459 }
1460 }
1461
1462 #[cfg(target_pointer_width = "32")]
1463 let mut lsize: usize = 0;
1464
1465 for spec in self.local.iter() {
1466 #[cfg(target_pointer_width = "64")]
1478 {
1479 let anno = MmapAnnotation::SideMeta {
1480 space: space_name,
1481 meta: spec.name,
1482 };
1483 match try_mmap_contiguous_metadata_space(start, size, spec, no_reserve, &anno) {
1484 Ok(_) => {}
1485 Err(e) => return Result::Err(e),
1486 }
1487 }
1488 #[cfg(target_pointer_width = "32")]
1489 {
1490 lsize += metadata_bytes_per_chunk(spec.log_bytes_in_region, spec.log_num_of_bits);
1491 }
1492 }
1493
1494 #[cfg(target_pointer_width = "32")]
1495 if lsize > 0 {
1496 let max = BYTES_IN_CHUNK >> super::constants::LOG_LOCAL_SIDE_METADATA_WORST_CASE_RATIO;
1497 debug_assert!(
1498 lsize <= max,
1499 "local side metadata per chunk (0x{:x}) must be less than (0x{:x})",
1500 lsize,
1501 max
1502 );
1503 let anno = MmapAnnotation::SideMeta {
1506 space: space_name,
1507 meta: "all",
1508 };
1509 match try_map_per_chunk_metadata_space(start, size, lsize, no_reserve, &anno) {
1510 Ok(_) => {}
1511 Err(e) => return Result::Err(e),
1512 }
1513 }
1514
1515 Ok(())
1516 }
1517
1518 #[cfg(test)]
1526 pub fn ensure_unmap_metadata_space(&self, start: Address, size: usize) {
1527 trace!("ensure_unmap_metadata_space({}, 0x{:x})", start, size);
1528 debug_assert!(start.is_aligned_to(BYTES_IN_PAGE));
1529 debug_assert!(size % BYTES_IN_PAGE == 0);
1530
1531 for spec in self.global.iter() {
1532 ensure_munmap_contiguous_metadata_space(start, size, spec);
1533 }
1534
1535 for spec in self.local.iter() {
1536 #[cfg(target_pointer_width = "64")]
1537 {
1538 ensure_munmap_contiguous_metadata_space(start, size, spec);
1539 }
1540 #[cfg(target_pointer_width = "32")]
1541 {
1542 ensure_munmap_chunked_metadata_space(start, size, spec);
1543 }
1544 }
1545 }
1546}
1547
1548pub struct MetadataByteArrayRef<const ENTRIES: usize> {
1550 #[cfg(feature = "extreme_assertions")]
1551 heap_range_start: Address,
1552 #[cfg(feature = "extreme_assertions")]
1553 spec: SideMetadataSpec,
1554 data: &'static [u8; ENTRIES],
1555}
1556
1557impl<const ENTRIES: usize> MetadataByteArrayRef<ENTRIES> {
1558 pub fn new(metadata_spec: &SideMetadataSpec, start: Address, bytes: usize) -> Self {
1567 debug_assert_eq!(
1568 metadata_spec.log_num_of_bits, LOG_BITS_IN_BYTE as usize,
1569 "Each heap entry should map to a byte in side-metadata"
1570 );
1571 debug_assert_eq!(
1572 bytes >> metadata_spec.log_bytes_in_region,
1573 ENTRIES,
1574 "Heap range size and MetadataByteArray size does not match"
1575 );
1576 Self {
1577 #[cfg(feature = "extreme_assertions")]
1578 heap_range_start: start,
1579 #[cfg(feature = "extreme_assertions")]
1580 spec: *metadata_spec,
1581 data: unsafe { &*address_to_meta_address(metadata_spec, start).to_ptr() },
1584 }
1585 }
1586
1587 #[allow(clippy::len_without_is_empty)]
1589 pub const fn len(&self) -> usize {
1590 ENTRIES
1591 }
1592
1593 #[allow(clippy::let_and_return)]
1595 pub fn get(&self, index: usize) -> u8 {
1596 #[cfg(feature = "extreme_assertions")]
1597 let _lock = sanity::SANITY_LOCK.lock().unwrap();
1598 let value = self.data[index];
1599 #[cfg(feature = "extreme_assertions")]
1600 {
1601 let data_addr = self.heap_range_start + (index << self.spec.log_bytes_in_region);
1602 sanity::verify_load::<u8>(&self.spec, data_addr, value);
1603 }
1604 value
1605 }
1606}
1607
1608#[cfg(test)]
1609mod tests {
1610 use super::*;
1611 use crate::mmap_anno_test;
1612 use crate::util::metadata::side_metadata::SideMetadataContext;
1613
1614 pub const ZERO_OFFSET: SideMetadataOffset = SideMetadataOffset { rel_offset: 0 };
1616
1617 #[test]
1618 fn calculate_reserved_pages_one_spec() {
1619 let spec = SideMetadataSpec {
1621 name: "test_spec",
1622 is_global: true,
1623 offset: ZERO_OFFSET,
1624 log_num_of_bits: 0,
1625 log_bytes_in_region: 3,
1626 };
1627 let side_metadata = SideMetadataContext {
1628 global: vec![spec],
1629 local: vec![],
1630 };
1631 assert_eq!(side_metadata.calculate_reserved_pages(0), 0);
1632 assert_eq!(side_metadata.calculate_reserved_pages(63), 1);
1633 assert_eq!(side_metadata.calculate_reserved_pages(64), 1);
1634 assert_eq!(side_metadata.calculate_reserved_pages(65), 2);
1635 assert_eq!(side_metadata.calculate_reserved_pages(1024), 16);
1636 }
1637
1638 #[test]
1639 fn calculate_reserved_pages_multi_specs() {
1640 let gspec = SideMetadataSpec {
1642 name: "gspec",
1643 is_global: true,
1644 offset: ZERO_OFFSET,
1645 log_num_of_bits: 0,
1646 log_bytes_in_region: 3,
1647 };
1648 let lspec = SideMetadataSpec {
1650 name: "lspec",
1651 is_global: false,
1652 offset: ZERO_OFFSET,
1653 log_num_of_bits: 1,
1654 log_bytes_in_region: 12,
1655 };
1656 let side_metadata = SideMetadataContext {
1657 global: vec![gspec],
1658 local: vec![lspec],
1659 };
1660 assert_eq!(side_metadata.calculate_reserved_pages(1024), 16 + 1);
1661 }
1662
1663 use crate::util::heap::layout::vm_layout;
1664 use crate::util::test_util::{serial_test, with_cleanup};
1665 use memory::MmapStrategy;
1666 use paste::paste;
1667
1668 const TEST_LOG_BYTES_IN_REGION: usize = 12;
1669
1670 fn test_side_metadata(
1671 log_bits: usize,
1672 f: impl Fn(&SideMetadataSpec, Address, Address) + std::panic::RefUnwindSafe,
1673 ) {
1674 serial_test(|| {
1675 let spec = SideMetadataSpec {
1676 name: "Test Spec $tname",
1677 is_global: true,
1678 offset: SideMetadataOffset::addr(GLOBAL_SIDE_METADATA_BASE_ADDRESS),
1679 log_num_of_bits: log_bits,
1680 log_bytes_in_region: TEST_LOG_BYTES_IN_REGION, };
1682 let context = SideMetadataContext {
1683 global: vec![spec],
1684 local: vec![],
1685 };
1686 let mut sanity = SideMetadataSanity::new();
1687 sanity.verify_metadata_context("TestPolicy", &context);
1688
1689 let data_addr = vm_layout::vm_layout().heap_start;
1690 crate::MMAPPER
1692 .ensure_mapped(data_addr, 1, MmapStrategy::TEST, mmap_anno_test!())
1693 .unwrap();
1694 let meta_addr = address_to_meta_address(&spec, data_addr);
1695 with_cleanup(
1696 || {
1697 let mmap_result =
1698 context.try_map_metadata_space(data_addr, BYTES_IN_PAGE, "test_space");
1699 assert!(mmap_result.is_ok());
1700
1701 f(&spec, data_addr, meta_addr);
1702 },
1703 || {
1704 assert!(log_bits <= 6);
1706 let meta_ptr: *mut u64 = meta_addr.to_mut_ptr();
1707 unsafe { *meta_ptr = 0 };
1708
1709 sanity::reset();
1710 },
1711 )
1712 })
1713 }
1714
1715 fn max_value(log_bits: usize) -> u64 {
1716 (0..(1 << log_bits)).fold(0, |accum, x| accum + (1 << x))
1717 }
1718 #[test]
1719 fn test_max_value() {
1720 assert_eq!(max_value(0), 1);
1721 assert_eq!(max_value(1), 0b11);
1722 assert_eq!(max_value(2), 0b1111);
1723 assert_eq!(max_value(3), 255);
1724 assert_eq!(max_value(4), 65535);
1725 }
1726
1727 macro_rules! test_side_metadata_access {
1728 ($tname: ident, $type: ty, $log_bits: expr) => {
1729 paste!{
1730 #[test]
1731 fn [<$tname _load>]() {
1732 test_side_metadata($log_bits, |spec, data_addr, meta_addr| {
1733 let meta_ptr: *mut $type = meta_addr.to_mut_ptr();
1734
1735 assert_eq!(unsafe { spec.load::<$type>(data_addr) }, 0);
1737 assert_eq!(spec.load_atomic::<$type>(data_addr, Ordering::SeqCst), 0);
1738
1739 let max_value: $type = max_value($log_bits) as _;
1741 unsafe { spec.store::<$type>(data_addr, max_value); }
1742 assert_eq!(unsafe { spec.load::<$type>(data_addr) }, max_value);
1743 assert_eq!(spec.load_atomic::<$type>(data_addr, Ordering::SeqCst), max_value);
1744 assert_eq!(unsafe { *meta_ptr }, max_value);
1745 });
1746 }
1747
1748 #[test]
1749 fn [<$tname _store>]() {
1750 test_side_metadata($log_bits, |spec, data_addr, meta_addr| {
1751 let meta_ptr: *mut $type = meta_addr.to_mut_ptr();
1752 let max_value: $type = max_value($log_bits) as _;
1753
1754 unsafe { *meta_ptr = <$type>::MAX; }
1756 unsafe { spec.store::<$type>(data_addr, 0); }
1758 assert_eq!(unsafe { spec.load::<$type>(data_addr) }, 0);
1759 assert_eq!(unsafe { *meta_ptr }, <$type>::MAX & (!max_value));
1761 });
1762 }
1763
1764 #[test]
1765 fn [<$tname _atomic_store>]() {
1766 test_side_metadata($log_bits, |spec, data_addr, meta_addr| {
1767 let meta_ptr: *mut $type = meta_addr.to_mut_ptr();
1768 let max_value: $type = max_value($log_bits) as _;
1769
1770 unsafe { *meta_ptr = <$type>::MAX; }
1772 spec.store_atomic::<$type>(data_addr, 0, Ordering::SeqCst);
1774 assert_eq!(unsafe { spec.load::<$type>(data_addr) }, 0);
1775 assert_eq!(unsafe { *meta_ptr }, <$type>::MAX & (!max_value));
1777 });
1778 }
1779
1780 #[test]
1781 fn [<$tname _compare_exchange_success>]() {
1782 test_side_metadata($log_bits, |spec, data_addr, meta_addr| {
1783 let meta_ptr: *mut $type = meta_addr.to_mut_ptr();
1784 let max_value: $type = max_value($log_bits) as _;
1785 unsafe { *meta_ptr = <$type>::MAX; }
1787 spec.store_atomic::<$type>(data_addr, 1, Ordering::SeqCst);
1789
1790 let old_val = spec.load_atomic::<$type>(data_addr, Ordering::SeqCst);
1791 assert_eq!(old_val, 1);
1792
1793 let new_val = 0;
1794 let res = spec.compare_exchange_atomic::<$type>(data_addr, old_val, new_val, Ordering::SeqCst, Ordering::SeqCst);
1795 assert!(res.is_ok());
1796 assert_eq!(res.unwrap(), old_val, "old vals do not match");
1797
1798 let after_update = spec.load_atomic::<$type>(data_addr, Ordering::SeqCst);
1799 assert_eq!(after_update, new_val);
1800 assert_eq!(unsafe { *meta_ptr }, <$type>::MAX & (!max_value));
1802 });
1803 }
1804
1805 #[test]
1806 fn [<$tname _compare_exchange_fail>]() {
1807 test_side_metadata($log_bits, |spec, data_addr, meta_addr| {
1808 let meta_ptr: *mut $type = meta_addr.to_mut_ptr();
1809 unsafe { *meta_ptr = <$type>::MAX; }
1811 spec.store_atomic::<$type>(data_addr, 1, Ordering::SeqCst);
1813
1814 let old_val = spec.load_atomic::<$type>(data_addr, Ordering::SeqCst);
1815 assert_eq!(old_val, 1);
1816
1817 spec.store_atomic::<$type>(data_addr, 0, Ordering::SeqCst);
1819 let bits_before_cas = unsafe { *meta_ptr };
1820
1821 let new_val = 0;
1822 let res = spec.compare_exchange_atomic::<$type>(data_addr, old_val, new_val, Ordering::SeqCst, Ordering::SeqCst);
1823 assert!(res.is_err());
1824 assert_eq!(res.err().unwrap(), 0);
1825 let bits_after_cas = unsafe { *meta_ptr };
1826 assert_eq!(bits_before_cas, bits_after_cas);
1827 });
1828 }
1829
1830 #[test]
1831 fn [<$tname _fetch_add_1>]() {
1832 test_side_metadata($log_bits, |spec, data_addr, meta_addr| {
1833 let meta_ptr: *mut $type = meta_addr.to_mut_ptr();
1834 unsafe { *meta_ptr = <$type>::MAX; }
1836 spec.store_atomic::<$type>(data_addr, 0, Ordering::SeqCst);
1838
1839 let old_val = spec.load_atomic::<$type>(data_addr, Ordering::SeqCst);
1840
1841 let old_val_from_fetch = spec.fetch_add_atomic::<$type>(data_addr, 1, Ordering::SeqCst);
1842 assert_eq!(old_val_from_fetch, old_val);
1843
1844 let new_val = spec.load_atomic::<$type>(data_addr, Ordering::SeqCst);
1845 assert_eq!(new_val, 1);
1846 });
1847 }
1848
1849 #[test]
1850 fn [<$tname _fetch_add_max>]() {
1851 test_side_metadata($log_bits, |spec, data_addr, meta_addr| {
1852 let meta_ptr: *mut $type = meta_addr.to_mut_ptr();
1853 let max_value: $type = max_value($log_bits) as _;
1854 unsafe { *meta_ptr = <$type>::MAX; }
1856 spec.store_atomic::<$type>(data_addr, 0, Ordering::SeqCst);
1858
1859 let old_val = spec.load_atomic::<$type>(data_addr, Ordering::SeqCst);
1860
1861 let old_val_from_fetch = spec.fetch_add_atomic::<$type>(data_addr, max_value, Ordering::SeqCst);
1862 assert_eq!(old_val_from_fetch, old_val);
1863
1864 let new_val = spec.load_atomic::<$type>(data_addr, Ordering::SeqCst);
1865 assert_eq!(new_val, max_value);
1866 });
1867 }
1868
1869 #[test]
1870 fn [<$tname _fetch_add_overflow>]() {
1871 test_side_metadata($log_bits, |spec, data_addr, meta_addr| {
1872 let meta_ptr: *mut $type = meta_addr.to_mut_ptr();
1873 let max_value: $type = max_value($log_bits) as _;
1874 unsafe { *meta_ptr = <$type>::MAX; }
1876 spec.store_atomic::<$type>(data_addr, max_value, Ordering::SeqCst);
1878
1879 let old_val = spec.load_atomic::<$type>(data_addr, Ordering::SeqCst);
1880
1881 let old_val_from_fetch = spec.fetch_add_atomic::<$type>(data_addr, 1, Ordering::SeqCst);
1883 assert_eq!(old_val_from_fetch, old_val);
1884
1885 let new_val = spec.load_atomic::<$type>(data_addr, Ordering::SeqCst);
1886 assert_eq!(new_val, 0);
1887 });
1888 }
1889
1890 #[test]
1891 fn [<$tname _fetch_sub_1>]() {
1892 test_side_metadata($log_bits, |spec, data_addr, meta_addr| {
1893 let meta_ptr: *mut $type = meta_addr.to_mut_ptr();
1894 unsafe { *meta_ptr = <$type>::MAX; }
1896 spec.store_atomic::<$type>(data_addr, 1, Ordering::SeqCst);
1898
1899 let old_val = spec.load_atomic::<$type>(data_addr, Ordering::SeqCst);
1900
1901 let old_val_from_fetch = spec.fetch_sub_atomic::<$type>(data_addr, 1, Ordering::SeqCst);
1902 assert_eq!(old_val_from_fetch, old_val);
1903
1904 let new_val = spec.load_atomic::<$type>(data_addr, Ordering::SeqCst);
1905 assert_eq!(new_val, 0);
1906 });
1907 }
1908
1909 #[test]
1910 fn [<$tname _fetch_sub_max>]() {
1911 test_side_metadata($log_bits, |spec, data_addr, meta_addr| {
1912 let meta_ptr: *mut $type = meta_addr.to_mut_ptr();
1913 let max_value: $type = max_value($log_bits) as _;
1914 unsafe { *meta_ptr = <$type>::MAX; }
1916 spec.store_atomic::<$type>(data_addr, max_value, Ordering::SeqCst);
1918
1919 let old_val = spec.load_atomic::<$type>(data_addr, Ordering::SeqCst);
1920
1921 let old_val_from_fetch = spec.fetch_sub_atomic::<$type>(data_addr, max_value, Ordering::SeqCst);
1922 assert_eq!(old_val_from_fetch, old_val);
1923
1924 let new_val = spec.load_atomic::<$type>(data_addr, Ordering::SeqCst);
1925 assert_eq!(new_val, 0);
1926 });
1927 }
1928
1929 #[test]
1930 fn [<$tname _fetch_sub_overflow>]() {
1931 test_side_metadata($log_bits, |spec, data_addr, meta_addr| {
1932 let meta_ptr: *mut $type = meta_addr.to_mut_ptr();
1933 let max_value: $type = max_value($log_bits) as _;
1934 unsafe { *meta_ptr = <$type>::MAX; }
1936 spec.store_atomic::<$type>(data_addr, 0, Ordering::SeqCst);
1938
1939 let old_val = spec.load_atomic::<$type>(data_addr, Ordering::SeqCst);
1940
1941 let old_val_from_fetch = spec.fetch_sub_atomic::<$type>(data_addr, 1, Ordering::SeqCst);
1943 assert_eq!(old_val_from_fetch, old_val);
1944
1945 let new_val = spec.load_atomic::<$type>(data_addr, Ordering::SeqCst);
1946 assert_eq!(new_val, max_value);
1947 });
1948 }
1949
1950 #[test]
1951 fn [<$tname _fetch_and>]() {
1952 test_side_metadata($log_bits, |spec, data_addr, meta_addr| {
1953 let meta_ptr: *mut $type = meta_addr.to_mut_ptr();
1954 let max_value: $type = max_value($log_bits) as _;
1955 unsafe { *meta_ptr = <$type>::MAX; }
1957 spec.store_atomic::<$type>(data_addr, max_value, Ordering::SeqCst);
1959
1960 let old_val = spec.load_atomic::<$type>(data_addr, Ordering::SeqCst);
1962 let old_val_from_fetch = spec.fetch_and_atomic::<$type>(data_addr, max_value, Ordering::SeqCst);
1963 assert_eq!(old_val_from_fetch, old_val, "old values do not match");
1964 assert_eq!(spec.load_atomic::<$type>(data_addr, Ordering::SeqCst), max_value, "load values do not match");
1965 assert_eq!(unsafe { *meta_ptr }, <$type>::MAX, "raw values do not match");
1966
1967 let last_bit_zero = max_value - 1;
1969 let old_val = spec.load_atomic::<$type>(data_addr, Ordering::SeqCst);
1970 let old_val_from_fetch = spec.fetch_and_atomic::<$type>(data_addr, last_bit_zero, Ordering::SeqCst);
1971 assert_eq!(old_val_from_fetch, old_val);
1972 assert_eq!(spec.load_atomic::<$type>(data_addr, Ordering::SeqCst), last_bit_zero);
1973 assert_eq!(unsafe { *meta_ptr }, <$type>::MAX - 1);
1974 });
1975 }
1976
1977 #[test]
1978 fn [<$tname _fetch_or>]() {
1979 test_side_metadata($log_bits, |spec, data_addr, meta_addr| {
1980 let meta_ptr: *mut $type = meta_addr.to_mut_ptr();
1981 let max_value: $type = max_value($log_bits) as _;
1982 unsafe { *meta_ptr = 0; }
1984 spec.store_atomic::<$type>(data_addr, 0, Ordering::SeqCst);
1986
1987 let old_val = spec.load_atomic::<$type>(data_addr, Ordering::SeqCst);
1989 let old_val_from_fetch = spec.fetch_or_atomic::<$type>(data_addr, 0, Ordering::SeqCst);
1990 assert_eq!(old_val_from_fetch, old_val);
1991 assert_eq!(spec.load_atomic::<$type>(data_addr, Ordering::SeqCst), 0);
1992 assert_eq!(unsafe { *meta_ptr }, 0);
1993
1994 let old_val = spec.load_atomic::<$type>(data_addr, Ordering::SeqCst);
1996 let old_val_from_fetch = spec.fetch_or_atomic::<$type>(data_addr, max_value, Ordering::SeqCst);
1997 assert_eq!(old_val_from_fetch, old_val);
1998 assert_eq!(spec.load_atomic::<$type>(data_addr, Ordering::SeqCst), max_value);
1999 assert_eq!(unsafe { *meta_ptr }, max_value);
2000 });
2001 }
2002
2003 #[test]
2004 fn [<$tname _fetch_update_success>]() {
2005 test_side_metadata($log_bits, |spec, data_addr, meta_addr| {
2006 let meta_ptr: *mut $type = meta_addr.to_mut_ptr();
2007 let max_value: $type = max_value($log_bits) as _;
2008 unsafe { *meta_ptr = <$type>::MAX; }
2010 spec.store_atomic::<$type>(data_addr, max_value, Ordering::SeqCst);
2012
2013 let old_val = spec.load_atomic::<$type>(data_addr, Ordering::SeqCst);
2015 let fetch_res = spec.fetch_update_atomic::<$type, _>(data_addr, Ordering::SeqCst, Ordering::SeqCst, |_x: $type| Some(0));
2016 assert!(fetch_res.is_ok());
2017 assert_eq!(fetch_res.unwrap(), old_val);
2018 assert_eq!(spec.load_atomic::<$type>(data_addr, Ordering::SeqCst), 0);
2019 assert_eq!(unsafe { *meta_ptr }, <$type>::MAX & (!max_value));
2021 });
2022 }
2023
2024 #[test]
2025 fn [<$tname _fetch_update_fail>]() {
2026 test_side_metadata($log_bits, |spec, data_addr, meta_addr| {
2027 let meta_ptr: *mut $type = meta_addr.to_mut_ptr();
2028 let max_value: $type = max_value($log_bits) as _;
2029 unsafe { *meta_ptr = <$type>::MAX; }
2031 spec.store_atomic::<$type>(data_addr, max_value, Ordering::SeqCst);
2033
2034 let old_val = spec.load_atomic::<$type>(data_addr, Ordering::SeqCst);
2036 let fetch_res = spec.fetch_update_atomic::<$type, _>(data_addr, Ordering::SeqCst, Ordering::SeqCst, |_x: $type| None);
2037 assert!(fetch_res.is_err());
2038 assert_eq!(fetch_res.err().unwrap(), old_val);
2039 assert_eq!(spec.load_atomic::<$type>(data_addr, Ordering::SeqCst), max_value);
2040 assert_eq!(unsafe { *meta_ptr }, <$type>::MAX);
2042 });
2043 }
2044
2045 #[test]
2046 fn [<$tname _find_prev_non_zero_value_easy>]() {
2047 test_side_metadata($log_bits, |spec, data_addr, _meta_addr| {
2048 let max_value: $type = max_value($log_bits) as _;
2049 spec.store_atomic::<$type>(data_addr, max_value, Ordering::SeqCst);
2051
2052 let res_addr = unsafe { spec.find_prev_non_zero_value::<$type>(data_addr, 8) };
2055 assert!(res_addr.is_some());
2056 assert_eq!(res_addr.unwrap(), data_addr);
2057 });
2058 }
2059
2060 #[test]
2061 fn [<$tname _find_prev_non_zero_value_arbitrary_bytes>]() {
2062 test_side_metadata($log_bits, |spec, data_addr, _meta_addr| {
2063 let max_value: $type = max_value($log_bits) as _;
2064 spec.store_atomic::<$type>(data_addr, max_value, Ordering::SeqCst);
2066
2067 let test_region = (1 << TEST_LOG_BYTES_IN_REGION);
2069 for len in 1..(test_region*4) {
2070 let start_addr = data_addr + len;
2071 let res_addr = unsafe { spec.find_prev_non_zero_value::<$type>(start_addr, len + 1) };
2073 assert!(res_addr.is_some());
2074 assert_eq!(res_addr.unwrap(), data_addr);
2075 }
2076 });
2077 }
2078
2079 #[test]
2080 fn [<$tname _find_prev_non_zero_value_arbitrary_start>]() {
2081 test_side_metadata($log_bits, |spec, data_addr, _meta_addr| {
2082 let max_value: $type = max_value($log_bits) as _;
2083
2084 for offset in 0..7usize {
2086 let test_data_addr = data_addr + offset;
2088 spec.store_atomic::<$type>(test_data_addr, max_value, Ordering::SeqCst);
2089
2090 let res_addr = unsafe { spec.find_prev_non_zero_value::<$type>(test_data_addr, 4096) };
2092 assert!(res_addr.is_some());
2093 assert_eq!(res_addr.unwrap(), data_addr);
2094
2095 spec.store_atomic::<$type>(test_data_addr, 0, Ordering::SeqCst);
2097 }
2098 });
2099 }
2100
2101 #[test]
2102 fn [<$tname _find_prev_non_zero_value_no_find>]() {
2103 test_side_metadata($log_bits, |spec, data_addr, _meta_addr| {
2104 spec.store_atomic::<$type>(data_addr, 0, Ordering::SeqCst);
2106
2107 let test_region = (1 << TEST_LOG_BYTES_IN_REGION);
2109 for len in 1..(test_region*4) {
2110 let start_addr = data_addr + len;
2111 let res_addr = unsafe { spec.find_prev_non_zero_value::<$type>(start_addr, len + 1) };
2113 assert!(res_addr.is_none());
2114 }
2115 });
2116 }
2117 }
2118 }
2119 }
2120
2121 test_side_metadata_access!(test_u1, u8, 0);
2122 test_side_metadata_access!(test_u2, u8, 1);
2123 test_side_metadata_access!(test_u4, u8, 2);
2124 test_side_metadata_access!(test_u8, u8, 3);
2125 test_side_metadata_access!(test_u16, u16, 4);
2126 test_side_metadata_access!(test_u32, u32, 5);
2127 test_side_metadata_access!(test_u64, u64, 6);
2128 test_side_metadata_access!(
2129 test_usize,
2130 usize,
2131 if cfg!(target_pointer_width = "64") {
2132 6
2133 } else if cfg!(target_pointer_width = "32") {
2134 5
2135 } else {
2136 unreachable!()
2137 }
2138 );
2139
2140 #[test]
2141 fn test_bulk_update_meta_bits() {
2142 let raw_mem =
2143 unsafe { std::alloc::alloc_zeroed(std::alloc::Layout::from_size_align(8, 8).unwrap()) };
2144 let addr = Address::from_mut_ptr(raw_mem);
2145
2146 SideMetadataSpec::set_meta_bits(addr, 0, addr, 4);
2147 assert_eq!(unsafe { addr.load::<u64>() }, 0b1111);
2148
2149 SideMetadataSpec::zero_meta_bits(addr, 1, addr, 3);
2150 assert_eq!(unsafe { addr.load::<u64>() }, 0b1001);
2151
2152 SideMetadataSpec::set_meta_bits(addr, 2, addr, 6);
2153 assert_eq!(unsafe { addr.load::<u64>() }, 0b0011_1101);
2154
2155 SideMetadataSpec::zero_meta_bits(addr, 0, addr + 1usize, 0);
2156 assert_eq!(unsafe { addr.load::<u64>() }, 0b0);
2157
2158 SideMetadataSpec::set_meta_bits(addr, 2, addr + 1usize, 2);
2159 assert_eq!(unsafe { addr.load::<u64>() }, 0b11_1111_1100);
2160
2161 SideMetadataSpec::set_meta_bits(addr, 0, addr + 1usize, 2);
2162 assert_eq!(unsafe { addr.load::<u64>() }, 0b11_1111_1111);
2163 }
2164}