1use super::*;
2use crate::util::constants::{BYTES_IN_PAGE, BYTES_IN_WORD, LOG_BITS_IN_BYTE};
3use crate::util::conversions::raw_align_up;
4use crate::util::heap::layout::vm_layout::BYTES_IN_CHUNK;
5use crate::util::metadata::metadata_val_traits::*;
6#[cfg(feature = "vo_bit")]
7use crate::util::metadata::vo_bit::VO_BIT_SIDE_METADATA_SPEC;
8use crate::util::os::*;
9use crate::util::Address;
10use num_traits::FromPrimitive;
11use ranges::BitByteRange;
12use std::fmt;
13use std::io::Result;
14use std::sync::atomic::{AtomicU8, Ordering};
15
16#[derive(Clone, Copy, PartialEq, Eq, Hash)]
23pub struct SideMetadataSpec {
24 pub name: &'static str,
26 pub is_global: bool,
29 pub offset: SideMetadataOffset,
31 pub log_num_of_bits: usize,
33 pub log_bytes_in_region: usize,
35}
36
37impl SideMetadataSpec {
38 pub const fn uses_contiguous_side_metadata(&self) -> bool {
40 self.is_global || cfg!(target_pointer_width = "64")
41 }
42
43 pub const fn is_absolute_offset(&self) -> bool {
45 self.uses_contiguous_side_metadata()
46 }
47
48 pub const fn is_rel_offset(&self) -> bool {
50 !self.is_absolute_offset()
51 }
52
53 pub const fn get_absolute_offset(&self) -> Address {
55 debug_assert!(self.is_absolute_offset());
56 unsafe { self.offset.addr }
57 }
58
59 pub const fn get_rel_offset(&self) -> usize {
61 debug_assert!(self.is_rel_offset());
62 unsafe { self.offset.rel_offset }
63 }
64
65 #[cfg(target_pointer_width = "64")]
67 pub const fn upper_bound_offset(&self) -> SideMetadataOffset {
68 debug_assert!(self.is_absolute_offset());
69 SideMetadataOffset {
70 addr: unsafe { self.offset.addr }
71 .add(crate::util::metadata::side_metadata::metadata_address_range_size(self)),
72 }
73 }
74
75 #[cfg(target_pointer_width = "32")]
77 pub const fn upper_bound_offset(&self) -> SideMetadataOffset {
78 if self.is_absolute_offset() {
79 SideMetadataOffset {
80 addr: unsafe { self.offset.addr }
81 .add(crate::util::metadata::side_metadata::metadata_address_range_size(self)),
82 }
83 } else {
84 SideMetadataOffset {
85 rel_offset: unsafe { self.offset.rel_offset }
86 + crate::util::metadata::side_metadata::metadata_bytes_per_chunk(
87 self.log_bytes_in_region,
88 self.log_num_of_bits,
89 ),
90 }
91 }
92 }
93
94 pub const fn upper_bound_address_for_contiguous(&self) -> Address {
99 debug_assert!(self.is_absolute_offset());
100 unsafe { self.upper_bound_offset().addr }
101 }
102
103 #[cfg(target_pointer_width = "32")]
108 pub const fn upper_bound_address_for_chunked(&self, data_addr: Address) -> Address {
109 debug_assert!(self.is_rel_offset());
110 address_to_meta_chunk_addr(data_addr).add(unsafe { self.upper_bound_offset().rel_offset })
111 }
112
113 #[cfg(debug_assertions)]
116 pub(crate) fn assert_metadata_mapped(&self, data_addr: Address) {
117 let meta_start = address_to_meta_address(self, data_addr).align_down(BYTES_IN_PAGE);
118
119 trace!(
120 "ensure_metadata_is_mapped({}).meta_start({})",
121 data_addr,
122 meta_start
123 );
124
125 OS::panic_if_unmapped(meta_start, BYTES_IN_PAGE);
126 }
127
128 #[cfg(debug_assertions)]
129 pub(crate) fn are_different_metadata_bits(&self, addr1: Address, addr2: Address) -> bool {
130 let a1 = address_to_meta_address(self, addr1);
131 let a2 = address_to_meta_address(self, addr2);
132 let s1 = meta_byte_lshift(self, addr1);
133 let s2 = meta_byte_lshift(self, addr2);
134 (a1, s1) != (a2, s2)
135 }
136
137 #[cfg(debug_assertions)]
141 fn assert_value_type<T: MetadataValue>(&self, val: Option<T>) {
142 let log_b = self.log_num_of_bits;
143 match log_b {
144 _ if log_b < 3 => {
145 assert_eq!(T::LOG2, 3);
146 if let Some(v) = val {
147 assert!(
148 v.to_u8().unwrap() < (1 << (1 << log_b)),
149 "Input value {:?} is invalid for the spec {:?}",
150 v,
151 self
152 );
153 }
154 }
155 3..=6 => assert_eq!(T::LOG2, log_b as u32),
156 _ => unreachable!("side metadata > {}-bits is not supported", 1 << log_b),
157 }
158 }
159
160 pub(crate) fn is_mapped(&self, data_addr: Address) -> bool {
162 use crate::MMAPPER;
163 let meta_addr = address_to_meta_address(self, data_addr);
164 MMAPPER.is_mapped_address(meta_addr)
165 }
166
167 pub(crate) fn zero_meta_bits(
169 meta_start_addr: Address,
170 meta_start_bit: u8,
171 meta_end_addr: Address,
172 meta_end_bit: u8,
173 ) {
174 let mut visitor = |range| {
175 match range {
176 BitByteRange::Bytes { start, end } => {
177 crate::util::memory::zero(start, end - start);
178 false
179 }
180 BitByteRange::BitsInByte {
181 addr,
182 bit_start,
183 bit_end,
184 } => {
185 let mask: u8 =
188 u8::MAX.checked_shl(bit_end as u32).unwrap_or(0) | !(u8::MAX << bit_start);
189 unsafe { addr.as_ref::<AtomicU8>() }.fetch_and(mask, Ordering::SeqCst);
190 false
191 }
192 }
193 };
194 ranges::break_bit_range(
195 meta_start_addr,
196 meta_start_bit,
197 meta_end_addr,
198 meta_end_bit,
199 true,
200 &mut visitor,
201 );
202 }
203
204 pub(crate) fn set_meta_bits(
206 meta_start_addr: Address,
207 meta_start_bit: u8,
208 meta_end_addr: Address,
209 meta_end_bit: u8,
210 ) {
211 let mut visitor = |range| {
212 match range {
213 BitByteRange::Bytes { start, end } => {
214 crate::util::memory::set(start, 0xff, end - start);
215 false
216 }
217 BitByteRange::BitsInByte {
218 addr,
219 bit_start,
220 bit_end,
221 } => {
222 let mask: u8 = !(u8::MAX.checked_shl(bit_end as u32).unwrap_or(0))
225 & (u8::MAX << bit_start);
226 unsafe { addr.as_ref::<AtomicU8>() }.fetch_or(mask, Ordering::SeqCst);
227 false
228 }
229 }
230 };
231 ranges::break_bit_range(
232 meta_start_addr,
233 meta_start_bit,
234 meta_end_addr,
235 meta_end_bit,
236 true,
237 &mut visitor,
238 );
239 }
240
241 pub(super) fn bulk_update_metadata(
244 &self,
245 start: Address,
246 size: usize,
247 update_meta_bits: &impl Fn(Address, u8, Address, u8),
248 ) {
249 let update_contiguous = |data_start: Address, data_bytes: usize| {
252 if data_bytes == 0 {
253 return;
254 }
255 let meta_start = address_to_meta_address(self, data_start);
256 let meta_start_shift = meta_byte_lshift(self, data_start);
257 let meta_end = address_to_meta_address(self, data_start + data_bytes);
258 let meta_end_shift = meta_byte_lshift(self, data_start + data_bytes);
259 update_meta_bits(meta_start, meta_start_shift, meta_end, meta_end_shift);
260 };
261
262 #[cfg(target_pointer_width = "32")]
270 let update_discontiguous = |data_start: Address, data_bytes: usize| {
271 use crate::util::constants::BITS_IN_BYTE;
272 if data_bytes == 0 {
273 return;
274 }
275 debug_assert_eq!(
276 data_start.align_down(BYTES_IN_CHUNK),
277 (data_start + data_bytes - 1).align_down(BYTES_IN_CHUNK),
278 "The data to be zeroed in discontiguous specs needs to be in the same chunk"
279 );
280 let meta_start = address_to_meta_address(self, data_start);
281 let meta_start_shift = meta_byte_lshift(self, data_start);
282 let meta_total_bits = (data_bytes >> self.log_bytes_in_region) << self.log_num_of_bits;
284 let meta_delta_bytes = meta_total_bits >> LOG_BITS_IN_BYTE;
285 let meta_delta_bits: u8 = (meta_total_bits % BITS_IN_BYTE) as u8;
286 let (meta_end, meta_end_shift) = {
288 let mut end_addr = meta_start + meta_delta_bytes;
289 let mut end_bit = meta_start_shift + meta_delta_bits;
290 if end_bit >= BITS_IN_BYTE as u8 {
291 end_bit -= BITS_IN_BYTE as u8;
292 end_addr += 1usize;
293 }
294 (end_addr, end_bit)
295 };
296
297 update_meta_bits(meta_start, meta_start_shift, meta_end, meta_end_shift);
298 };
299
300 if cfg!(target_pointer_width = "64") || self.is_global {
301 update_contiguous(start, size);
302 }
303 #[cfg(target_pointer_width = "32")]
304 if !self.is_global {
305 let chunk_num = ((start + size).align_down(BYTES_IN_CHUNK)
307 - start.align_down(BYTES_IN_CHUNK))
308 / BYTES_IN_CHUNK;
309 if chunk_num == 0 {
310 update_discontiguous(start, size);
311 } else {
312 let second_data_chunk = start.align_up(BYTES_IN_CHUNK);
313 update_discontiguous(start, second_data_chunk - start);
315
316 let last_data_chunk = (start + size).align_down(BYTES_IN_CHUNK);
317 update_discontiguous(last_data_chunk, start + size - last_data_chunk);
319 let mut next_data_chunk = second_data_chunk;
320
321 while next_data_chunk != last_data_chunk {
323 update_discontiguous(next_data_chunk, BYTES_IN_CHUNK);
324 next_data_chunk += BYTES_IN_CHUNK;
325 }
326 }
327 }
328 }
329
330 pub fn bzero_metadata(&self, start: Address, size: usize) {
340 #[cfg(feature = "extreme_assertions")]
341 let _lock = sanity::SANITY_LOCK.lock().unwrap();
342
343 #[cfg(feature = "extreme_assertions")]
344 sanity::verify_bzero(self, start, size);
345
346 self.bulk_update_metadata(start, size, &Self::zero_meta_bits)
347 }
348
349 pub fn bset_metadata(&self, start: Address, size: usize) {
359 #[cfg(feature = "extreme_assertions")]
360 let _lock = sanity::SANITY_LOCK.lock().unwrap();
361
362 #[cfg(feature = "extreme_assertions")]
363 sanity::verify_bset(self, start, size);
364
365 self.bulk_update_metadata(start, size, &Self::set_meta_bits)
366 }
367
368 pub fn bcopy_metadata_contiguous(&self, start: Address, size: usize, other: &SideMetadataSpec) {
381 #[cfg(feature = "extreme_assertions")]
382 let _lock = sanity::SANITY_LOCK.lock().unwrap();
383
384 #[cfg(feature = "extreme_assertions")]
385 sanity::verify_bcopy(self, start, size, other);
386
387 debug_assert_eq!(other.log_bytes_in_region, self.log_bytes_in_region);
388 debug_assert_eq!(other.log_num_of_bits, self.log_num_of_bits);
389
390 let dst_meta_start_addr = address_to_meta_address(self, start);
391 let dst_meta_start_bit = meta_byte_lshift(self, start);
392 let dst_meta_end_addr = address_to_meta_address(self, start + size);
393 let dst_meta_end_bit = meta_byte_lshift(self, start + size);
394
395 let src_meta_start_addr = address_to_meta_address(other, start);
396 let src_meta_start_bit = meta_byte_lshift(other, start);
397
398 debug_assert_eq!(dst_meta_start_bit, src_meta_start_bit);
399
400 let mut visitor = |range| {
401 match range {
402 BitByteRange::Bytes {
403 start: dst_start,
404 end: dst_end,
405 } => unsafe {
406 let byte_offset = dst_start - dst_meta_start_addr;
407 let src_start = src_meta_start_addr + byte_offset;
408 let size = dst_end - dst_start;
409 std::ptr::copy::<u8>(src_start.to_ptr(), dst_start.to_mut_ptr(), size);
410 false
411 },
412 BitByteRange::BitsInByte {
413 addr: dst,
414 bit_start,
415 bit_end,
416 } => {
417 let byte_offset = dst - dst_meta_start_addr;
418 let src = src_meta_start_addr + byte_offset;
419 let mask: u8 = !(u8::MAX.checked_shl(bit_end as u32).unwrap_or(0))
421 & (u8::MAX << bit_start); let old_src = unsafe { src.as_ref::<AtomicU8>() }.load(Ordering::Relaxed);
423 let old_dst = unsafe { dst.as_ref::<AtomicU8>() }.load(Ordering::Relaxed);
424 let new = (old_src & mask) | (old_dst & !mask);
425 unsafe { dst.as_ref::<AtomicU8>() }.store(new, Ordering::Relaxed);
426 false
427 }
428 }
429 };
430
431 ranges::break_bit_range(
432 dst_meta_start_addr,
433 dst_meta_start_bit,
434 dst_meta_end_addr,
435 dst_meta_end_bit,
436 true,
437 &mut visitor,
438 );
439 }
440
441 #[allow(unused_variables)] fn side_metadata_access<
449 const CHECK_VALUE: bool,
450 T: MetadataValue,
451 R: Copy,
452 F: FnOnce() -> R,
453 V: FnOnce(R),
454 >(
455 &self,
456 data_addr: Address,
457 input: Option<T>,
458 access_func: F,
459 verify_func: V,
460 ) -> R {
461 #[cfg(feature = "extreme_assertions")]
467 let _lock = sanity::SANITY_LOCK.lock().unwrap();
468
469 #[cfg(debug_assertions)]
471 {
472 if CHECK_VALUE {
473 self.assert_value_type::<T>(input);
474 }
475 #[cfg(feature = "extreme_assertions")]
476 self.assert_metadata_mapped(data_addr);
477 }
478
479 let ret = access_func();
481
482 if CHECK_VALUE {
484 verify_func(ret);
485 }
486
487 ret
488 }
489
490 pub unsafe fn load<T: MetadataValue>(&self, data_addr: Address) -> T {
499 self.side_metadata_access::<true, T, _, _, _>(
500 data_addr,
501 None,
502 || {
503 let meta_addr = address_to_meta_address(self, data_addr);
504 let bits_num_log = self.log_num_of_bits;
505 if bits_num_log < 3 {
506 let lshift = meta_byte_lshift(self, data_addr);
507 let mask = meta_byte_mask(self) << lshift;
508 let byte_val = meta_addr.load::<u8>();
509
510 FromPrimitive::from_u8((byte_val & mask) >> lshift).unwrap()
511 } else {
512 meta_addr.load::<T>()
513 }
514 },
515 |_v| {
516 #[cfg(feature = "extreme_assertions")]
517 sanity::verify_load(self, data_addr, _v);
518 },
519 )
520 }
521
522 pub unsafe fn store<T: MetadataValue>(&self, data_addr: Address, metadata: T) {
531 self.side_metadata_access::<true, T, _, _, _>(
532 data_addr,
533 Some(metadata),
534 || {
535 let meta_addr = address_to_meta_address(self, data_addr);
536 let bits_num_log = self.log_num_of_bits;
537 if bits_num_log < 3 {
538 let lshift = meta_byte_lshift(self, data_addr);
539 let mask = meta_byte_mask(self) << lshift;
540 let old_val = meta_addr.load::<u8>();
541 let new_val = (old_val & !mask) | (metadata.to_u8().unwrap() << lshift);
542
543 meta_addr.store::<u8>(new_val);
544 } else {
545 meta_addr.store::<T>(metadata);
546 }
547 },
548 |_| {
549 #[cfg(feature = "extreme_assertions")]
550 sanity::verify_store(self, data_addr, metadata);
551 },
552 )
553 }
554
555 pub fn load_atomic<T: MetadataValue>(&self, data_addr: Address, order: Ordering) -> T {
558 self.side_metadata_access::<true, T, _, _, _>(
559 data_addr,
560 None,
561 || {
562 let meta_addr = address_to_meta_address(self, data_addr);
563 let bits_num_log = self.log_num_of_bits;
564 if bits_num_log < 3 {
565 let lshift = meta_byte_lshift(self, data_addr);
566 let mask = meta_byte_mask(self) << lshift;
567 let byte_val = unsafe { meta_addr.atomic_load::<AtomicU8>(order) };
568 FromPrimitive::from_u8((byte_val & mask) >> lshift).unwrap()
569 } else {
570 unsafe { T::load_atomic(meta_addr, order) }
571 }
572 },
573 |_v| {
574 #[cfg(feature = "extreme_assertions")]
575 sanity::verify_load(self, data_addr, _v);
576 },
577 )
578 }
579
580 pub fn store_atomic<T: MetadataValue>(&self, data_addr: Address, metadata: T, order: Ordering) {
583 self.side_metadata_access::<true, T, _, _, _>(
584 data_addr,
585 Some(metadata),
586 || {
587 let meta_addr = address_to_meta_address(self, data_addr);
588 let bits_num_log = self.log_num_of_bits;
589 if bits_num_log < 3 {
590 let lshift = meta_byte_lshift(self, data_addr);
591 let mask = meta_byte_mask(self) << lshift;
592 let metadata_u8 = metadata.to_u8().unwrap();
593 let _ = unsafe {
594 <u8 as MetadataValue>::fetch_update(meta_addr, order, order, |v: u8| {
595 Some((v & !mask) | (metadata_u8 << lshift))
596 })
597 };
598 } else {
599 unsafe {
600 T::store_atomic(meta_addr, metadata, order);
601 }
602 }
603 },
604 |_| {
605 #[cfg(feature = "extreme_assertions")]
606 sanity::verify_store(self, data_addr, metadata);
607 },
608 )
609 }
610
611 pub unsafe fn set_zero(&self, data_addr: Address) {
621 use num_traits::Zero;
622 match self.log_num_of_bits {
623 0..=3 => self.store(data_addr, u8::zero()),
624 4 => self.store(data_addr, u16::zero()),
625 5 => self.store(data_addr, u32::zero()),
626 6 => self.store(data_addr, u64::zero()),
627 _ => unreachable!(),
628 }
629 }
630
631 pub fn set_zero_atomic(&self, data_addr: Address, order: Ordering) {
634 use num_traits::Zero;
635 match self.log_num_of_bits {
636 0..=3 => self.store_atomic(data_addr, u8::zero(), order),
637 4 => self.store_atomic(data_addr, u16::zero(), order),
638 5 => self.store_atomic(data_addr, u32::zero(), order),
639 6 => self.store_atomic(data_addr, u64::zero(), order),
640 _ => unreachable!(),
641 }
642 }
643
644 pub unsafe fn set_raw_byte_atomic(&self, data_addr: Address, order: Ordering) {
655 debug_assert!(self.log_num_of_bits < 3);
656 cfg_if::cfg_if! {
657 if #[cfg(feature = "extreme_assertions")] {
658 self.store_atomic::<u8>(data_addr, 1, order)
660 } else {
661 self.side_metadata_access::<false, u8, _, _, _>(
662 data_addr,
663 Some(1u8),
664 || {
665 let meta_addr = address_to_meta_address(self, data_addr);
666 u8::store_atomic(meta_addr, 0xffu8, order);
667 },
668 |_| {}
669 )
670 }
671 }
672 }
673
674 pub unsafe fn load_raw_byte(&self, data_addr: Address) -> u8 {
682 debug_assert!(self.log_num_of_bits < 3);
683 self.side_metadata_access::<false, u8, _, _, _>(
684 data_addr,
685 None,
686 || {
687 let meta_addr = address_to_meta_address(self, data_addr);
688 meta_addr.load::<u8>()
689 },
690 |_| {},
691 )
692 }
693
694 pub unsafe fn load_raw_word(&self, data_addr: Address) -> usize {
702 use crate::util::constants::*;
703 debug_assert!(self.log_num_of_bits < (LOG_BITS_IN_BYTE + LOG_BYTES_IN_ADDRESS) as usize);
704 self.side_metadata_access::<false, usize, _, _, _>(
705 data_addr,
706 None,
707 || {
708 let meta_addr = address_to_meta_address(self, data_addr);
709 let aligned_meta_addr = meta_addr.align_down(BYTES_IN_ADDRESS);
710 aligned_meta_addr.load::<usize>()
711 },
712 |_| {},
713 )
714 }
715
716 pub fn compare_exchange_atomic<T: MetadataValue>(
721 &self,
722 data_addr: Address,
723 old_metadata: T,
724 new_metadata: T,
725 success_order: Ordering,
726 failure_order: Ordering,
727 ) -> std::result::Result<T, T> {
728 self.side_metadata_access::<true, T, _, _, _>(
729 data_addr,
730 Some(new_metadata),
731 || {
732 let meta_addr = address_to_meta_address(self, data_addr);
733 let bits_num_log = self.log_num_of_bits;
734 if bits_num_log < 3 {
735 let lshift = meta_byte_lshift(self, data_addr);
736 let mask = meta_byte_mask(self) << lshift;
737
738 let real_old_byte = unsafe { meta_addr.atomic_load::<AtomicU8>(success_order) };
739 let expected_old_byte =
740 (real_old_byte & !mask) | ((old_metadata.to_u8().unwrap()) << lshift);
741 let expected_new_byte =
742 (expected_old_byte & !mask) | ((new_metadata.to_u8().unwrap()) << lshift);
743
744 unsafe {
745 meta_addr.compare_exchange::<AtomicU8>(
746 expected_old_byte,
747 expected_new_byte,
748 success_order,
749 failure_order,
750 )
751 }
752 .map(|x| FromPrimitive::from_u8((x & mask) >> lshift).unwrap())
753 .map_err(|x| FromPrimitive::from_u8((x & mask) >> lshift).unwrap())
754 } else {
755 unsafe {
756 T::compare_exchange(
757 meta_addr,
758 old_metadata,
759 new_metadata,
760 success_order,
761 failure_order,
762 )
763 }
764 }
765 },
766 |_res| {
767 #[cfg(feature = "extreme_assertions")]
768 if _res.is_ok() {
769 sanity::verify_store(self, data_addr, new_metadata);
770 }
771 },
772 )
773 }
774
775 fn fetch_ops_on_bits<F: Fn(u8) -> u8>(
778 &self,
779 data_addr: Address,
780 meta_addr: Address,
781 set_order: Ordering,
782 fetch_order: Ordering,
783 update: F,
784 ) -> u8 {
785 let lshift = meta_byte_lshift(self, data_addr);
786 let mask = meta_byte_mask(self) << lshift;
787
788 let old_raw_byte = unsafe {
789 <u8 as MetadataValue>::fetch_update(
790 meta_addr,
791 set_order,
792 fetch_order,
793 |raw_byte: u8| {
794 let old_val = (raw_byte & mask) >> lshift;
795 let new_val = update(old_val);
796 let new_raw_byte = (raw_byte & !mask) | ((new_val << lshift) & mask);
797 Some(new_raw_byte)
798 },
799 )
800 }
801 .unwrap();
802 (old_raw_byte & mask) >> lshift
803 }
804
805 pub fn fetch_add_atomic<T: MetadataValue>(
809 &self,
810 data_addr: Address,
811 val: T,
812 order: Ordering,
813 ) -> T {
814 self.side_metadata_access::<true, T, _, _, _>(
815 data_addr,
816 Some(val),
817 || {
818 let meta_addr = address_to_meta_address(self, data_addr);
819 let bits_num_log = self.log_num_of_bits;
820 if bits_num_log < 3 {
821 FromPrimitive::from_u8(self.fetch_ops_on_bits(
822 data_addr,
823 meta_addr,
824 order,
825 order,
826 |x: u8| x.wrapping_add(val.to_u8().unwrap()),
827 ))
828 .unwrap()
829 } else {
830 unsafe { T::fetch_add(meta_addr, val, order) }
831 }
832 },
833 |_old_val| {
834 #[cfg(feature = "extreme_assertions")]
835 sanity::verify_update::<T>(self, data_addr, _old_val, _old_val.wrapping_add(&val))
836 },
837 )
838 }
839
840 pub fn fetch_sub_atomic<T: MetadataValue>(
844 &self,
845 data_addr: Address,
846 val: T,
847 order: Ordering,
848 ) -> T {
849 self.side_metadata_access::<true, T, _, _, _>(
850 data_addr,
851 Some(val),
852 || {
853 let meta_addr = address_to_meta_address(self, data_addr);
854 if self.log_num_of_bits < 3 {
855 FromPrimitive::from_u8(self.fetch_ops_on_bits(
856 data_addr,
857 meta_addr,
858 order,
859 order,
860 |x: u8| x.wrapping_sub(val.to_u8().unwrap()),
861 ))
862 .unwrap()
863 } else {
864 unsafe { T::fetch_sub(meta_addr, val, order) }
865 }
866 },
867 |_old_val| {
868 #[cfg(feature = "extreme_assertions")]
869 sanity::verify_update::<T>(self, data_addr, _old_val, _old_val.wrapping_sub(&val))
870 },
871 )
872 }
873
874 pub fn fetch_and_atomic<T: MetadataValue>(
878 &self,
879 data_addr: Address,
880 val: T,
881 order: Ordering,
882 ) -> T {
883 self.side_metadata_access::<true, T, _, _, _>(
884 data_addr,
885 Some(val),
886 || {
887 let meta_addr = address_to_meta_address(self, data_addr);
888 if self.log_num_of_bits < 3 {
889 let lshift = meta_byte_lshift(self, data_addr);
890 let mask = meta_byte_mask(self) << lshift;
891 let rhs = (val.to_u8().unwrap() << lshift) | !mask;
893 let old_raw_byte =
894 unsafe { <u8 as MetadataValue>::fetch_and(meta_addr, rhs, order) };
895 let old_val = (old_raw_byte & mask) >> lshift;
896 FromPrimitive::from_u8(old_val).unwrap()
897 } else {
898 unsafe { T::fetch_and(meta_addr, val, order) }
899 }
900 },
901 |_old_val| {
902 #[cfg(feature = "extreme_assertions")]
903 sanity::verify_update::<T>(self, data_addr, _old_val, _old_val.bitand(val))
904 },
905 )
906 }
907
908 pub fn fetch_or_atomic<T: MetadataValue>(
912 &self,
913 data_addr: Address,
914 val: T,
915 order: Ordering,
916 ) -> T {
917 self.side_metadata_access::<true, T, _, _, _>(
918 data_addr,
919 Some(val),
920 || {
921 let meta_addr = address_to_meta_address(self, data_addr);
922 if self.log_num_of_bits < 3 {
923 let lshift = meta_byte_lshift(self, data_addr);
924 let mask = meta_byte_mask(self) << lshift;
925 let rhs = (val.to_u8().unwrap() << lshift) & mask;
927 let old_raw_byte =
928 unsafe { <u8 as MetadataValue>::fetch_or(meta_addr, rhs, order) };
929 let old_val = (old_raw_byte & mask) >> lshift;
930 FromPrimitive::from_u8(old_val).unwrap()
931 } else {
932 unsafe { T::fetch_or(meta_addr, val, order) }
933 }
934 },
935 |_old_val| {
936 #[cfg(feature = "extreme_assertions")]
937 sanity::verify_update::<T>(self, data_addr, _old_val, _old_val.bitor(val))
938 },
939 )
940 }
941
942 pub fn fetch_update_atomic<T: MetadataValue, F: FnMut(T) -> Option<T> + Copy>(
946 &self,
947 data_addr: Address,
948 set_order: Ordering,
949 fetch_order: Ordering,
950 mut f: F,
951 ) -> std::result::Result<T, T> {
952 self.side_metadata_access::<true, T, _, _, _>(
953 data_addr,
954 None,
955 move || -> std::result::Result<T, T> {
956 let meta_addr = address_to_meta_address(self, data_addr);
957 if self.log_num_of_bits < 3 {
958 let lshift = meta_byte_lshift(self, data_addr);
959 let mask = meta_byte_mask(self) << lshift;
960
961 unsafe {
962 <u8 as MetadataValue>::fetch_update(
963 meta_addr,
964 set_order,
965 fetch_order,
966 |raw_byte: u8| {
967 let old_val = (raw_byte & mask) >> lshift;
968 f(FromPrimitive::from_u8(old_val).unwrap()).map(|new_val| {
969 (raw_byte & !mask)
970 | ((new_val.to_u8().unwrap() << lshift) & mask)
971 })
972 },
973 )
974 }
975 .map(|x| FromPrimitive::from_u8((x & mask) >> lshift).unwrap())
976 .map_err(|x| FromPrimitive::from_u8((x & mask) >> lshift).unwrap())
977 } else {
978 unsafe { T::fetch_update(meta_addr, set_order, fetch_order, f) }
979 }
980 },
981 |_result| {
982 #[cfg(feature = "extreme_assertions")]
983 if let Ok(old_val) = _result {
984 sanity::verify_update::<T>(self, data_addr, old_val, f(old_val).unwrap())
985 }
986 },
987 )
988 }
989
990 #[allow(clippy::let_and_return)]
1004 pub unsafe fn find_prev_non_zero_value<T: MetadataValue>(
1005 &self,
1006 data_addr: Address,
1007 search_limit_bytes: usize,
1008 ) -> Option<Address> {
1009 debug_assert!(search_limit_bytes > 0);
1010
1011 if self.uses_contiguous_side_metadata() {
1012 let result = self.find_prev_non_zero_value_fast::<T>(data_addr, search_limit_bytes);
1014 #[cfg(debug_assertions)]
1015 {
1016 let result2 =
1018 self.find_prev_non_zero_value_simple::<T>(data_addr, search_limit_bytes);
1019 assert_eq!(result, result2, "find_prev_non_zero_value_fast returned a diffrent result from the naive implementation.");
1020 }
1021 result
1022 } else {
1023 warn!("We are trying to search non zero bits in an discontiguous side metadata. The performance is slow, as MMTk does not optimize for this case.");
1027 self.find_prev_non_zero_value_simple::<T>(data_addr, search_limit_bytes)
1028 }
1029 }
1030
1031 fn find_prev_non_zero_value_simple<T: MetadataValue>(
1032 &self,
1033 data_addr: Address,
1034 search_limit_bytes: usize,
1035 ) -> Option<Address> {
1036 let region_bytes = 1 << self.log_bytes_in_region;
1037 let start_addr = data_addr.align_down(region_bytes);
1039 let end_addr = data_addr.saturating_sub(search_limit_bytes) + 1usize;
1040
1041 let mut cursor = start_addr;
1042 while cursor >= end_addr {
1043 if !cursor.is_mapped() {
1045 return None;
1046 }
1047 if !unsafe { self.load::<T>(cursor).is_zero() } {
1049 return Some(cursor);
1050 }
1051 cursor -= region_bytes;
1052 }
1053 None
1054 }
1055
1056 #[allow(clippy::let_and_return)]
1057 fn find_prev_non_zero_value_fast<T: MetadataValue>(
1058 &self,
1059 data_addr: Address,
1060 search_limit_bytes: usize,
1061 ) -> Option<Address> {
1062 debug_assert!(self.uses_contiguous_side_metadata());
1063
1064 if !data_addr.is_mapped() {
1066 return None;
1067 }
1068 if !unsafe { self.load::<T>(data_addr).is_zero() } {
1070 return Some(data_addr.align_down(1 << self.log_bytes_in_region));
1071 }
1072
1073 let start_addr = data_addr.saturating_sub(search_limit_bytes) + 1usize;
1075 let end_addr = data_addr;
1076
1077 let start_meta_addr = address_to_contiguous_meta_address(self, start_addr);
1081 let start_meta_shift = meta_byte_lshift(self, start_addr);
1082 let end_meta_addr = address_to_contiguous_meta_address(self, end_addr);
1083 let end_meta_shift = meta_byte_lshift(self, end_addr);
1084
1085 let mut res = None;
1086
1087 let mut visitor = |range: BitByteRange| {
1088 match range {
1089 BitByteRange::Bytes { start, end } => {
1090 match helpers::find_last_non_zero_bit_in_metadata_bytes(start, end) {
1091 helpers::FindMetaBitResult::Found { addr, bit } => {
1092 let (addr, bit) = align_metadata_address(self, addr, bit);
1093 res = Some(contiguous_meta_address_to_address(self, addr, bit));
1094 true
1096 }
1097 helpers::FindMetaBitResult::UnmappedMetadata => true,
1099 helpers::FindMetaBitResult::NotFound => false,
1101 }
1102 }
1103 BitByteRange::BitsInByte {
1104 addr,
1105 bit_start,
1106 bit_end,
1107 } => {
1108 match helpers::find_last_non_zero_bit_in_metadata_bits(addr, bit_start, bit_end)
1109 {
1110 helpers::FindMetaBitResult::Found { addr, bit } => {
1111 let (addr, bit) = align_metadata_address(self, addr, bit);
1112 res = Some(contiguous_meta_address_to_address(self, addr, bit));
1113 true
1115 }
1116 helpers::FindMetaBitResult::UnmappedMetadata => true,
1118 helpers::FindMetaBitResult::NotFound => false,
1120 }
1121 }
1122 }
1123 };
1124
1125 ranges::break_bit_range(
1126 start_meta_addr,
1127 start_meta_shift,
1128 end_meta_addr,
1129 end_meta_shift,
1130 false,
1131 &mut visitor,
1132 );
1133
1134 res.map(|addr| addr.align_down(1 << self.log_bytes_in_region))
1139 .filter(|addr| *addr >= start_addr && *addr < end_addr)
1140 }
1141
1142 pub fn scan_non_zero_values<T: MetadataValue>(
1152 &self,
1153 data_start_addr: Address,
1154 data_end_addr: Address,
1155 visit_data: &mut impl FnMut(Address),
1156 ) {
1157 if self.uses_contiguous_side_metadata() && self.log_num_of_bits == 0 {
1158 self.scan_non_zero_values_fast(data_start_addr, data_end_addr, visit_data);
1163 } else {
1164 warn!(
1169 "We are trying to search for non zero bits in a discontiguous side metadata \
1170 or the metadata has more than one bit per region. \
1171 The performance is slow, as MMTk does not optimize for this case."
1172 );
1173 self.scan_non_zero_values_simple::<T>(data_start_addr, data_end_addr, visit_data);
1174 }
1175 }
1176
1177 fn scan_non_zero_values_simple<T: MetadataValue>(
1178 &self,
1179 data_start_addr: Address,
1180 data_end_addr: Address,
1181 visit_data: &mut impl FnMut(Address),
1182 ) {
1183 let region_bytes = 1usize << self.log_bytes_in_region;
1184
1185 let mut cursor = data_start_addr;
1186 while cursor < data_end_addr {
1187 debug_assert!(cursor.is_mapped());
1188
1189 if !unsafe { self.load::<T>(cursor).is_zero() } {
1191 visit_data(cursor);
1192 }
1193 cursor += region_bytes;
1194 }
1195 }
1196
1197 fn scan_non_zero_values_fast(
1198 &self,
1199 data_start_addr: Address,
1200 data_end_addr: Address,
1201 visit_data: &mut impl FnMut(Address),
1202 ) {
1203 debug_assert!(self.uses_contiguous_side_metadata());
1204 debug_assert_eq!(self.log_num_of_bits, 0);
1205
1206 let start_meta_addr = address_to_contiguous_meta_address(self, data_start_addr);
1208 let start_meta_shift = meta_byte_lshift(self, data_start_addr);
1209 let end_meta_addr = address_to_contiguous_meta_address(self, data_end_addr);
1210 let end_meta_shift = meta_byte_lshift(self, data_end_addr);
1211
1212 let mut visitor = |range| {
1213 match range {
1214 BitByteRange::Bytes { start, end } => {
1215 helpers::scan_non_zero_bits_in_metadata_bytes(start, end, &mut |addr, bit| {
1216 visit_data(helpers::contiguous_meta_address_to_address(self, addr, bit));
1217 });
1218 }
1219 BitByteRange::BitsInByte {
1220 addr,
1221 bit_start,
1222 bit_end,
1223 } => helpers::scan_non_zero_bits_in_metadata_bits(
1224 addr,
1225 bit_start,
1226 bit_end,
1227 &mut |addr, bit| {
1228 visit_data(helpers::contiguous_meta_address_to_address(self, addr, bit));
1229 },
1230 ),
1231 }
1232 false
1233 };
1234
1235 ranges::break_bit_range(
1236 start_meta_addr,
1237 start_meta_shift,
1238 end_meta_addr,
1239 end_meta_shift,
1240 true,
1241 &mut visitor,
1242 );
1243 }
1244}
1245
1246impl fmt::Debug for SideMetadataSpec {
1247 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1248 f.write_fmt(format_args!(
1249 "SideMetadataSpec {} {{ \
1250 **is_global: {:?} \
1251 **offset: {} \
1252 **log_num_of_bits: 0x{:x} \
1253 **log_bytes_in_region: 0x{:x} \
1254 }}",
1255 self.name,
1256 self.is_global,
1257 unsafe {
1258 if self.is_absolute_offset() {
1259 format!("0x{:x}", self.offset.addr)
1260 } else {
1261 format!("0x{:x}", self.offset.rel_offset)
1262 }
1263 },
1264 self.log_num_of_bits,
1265 self.log_bytes_in_region
1266 ))
1267 }
1268}
1269
1270#[derive(Clone, Copy)]
1274pub union SideMetadataOffset {
1275 addr: Address,
1276 rel_offset: usize,
1277}
1278
1279impl SideMetadataOffset {
1280 pub const fn addr(addr: Address) -> Self {
1282 SideMetadataOffset { addr }
1283 }
1284
1285 pub const fn rel(rel_offset: usize) -> Self {
1287 SideMetadataOffset { rel_offset }
1288 }
1289
1290 pub const fn layout_after(spec: &SideMetadataSpec) -> SideMetadataOffset {
1292 let upper_bound_offset = spec.upper_bound_offset();
1301 if spec.is_absolute_offset() {
1302 let addr = unsafe { upper_bound_offset.addr };
1303 let aligned_addr = addr.align_up(BYTES_IN_WORD);
1304 SideMetadataOffset::addr(aligned_addr)
1305 } else {
1306 let rel_offset = unsafe { upper_bound_offset.rel_offset };
1307 let aligned_rel_offset = raw_align_up(rel_offset, BYTES_IN_WORD);
1308 SideMetadataOffset::rel(aligned_rel_offset)
1309 }
1310 }
1311}
1312
1313impl PartialEq for SideMetadataOffset {
1316 fn eq(&self, other: &Self) -> bool {
1317 unsafe { self.rel_offset == other.rel_offset }
1318 }
1319}
1320impl Eq for SideMetadataOffset {}
1321
1322impl std::hash::Hash for SideMetadataOffset {
1323 fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
1324 unsafe { self.rel_offset }.hash(state);
1325 }
1326}
1327
1328pub(crate) struct SideMetadataContext {
1331 pub global: Vec<SideMetadataSpec>,
1333 pub local: Vec<SideMetadataSpec>,
1335}
1336
1337impl SideMetadataContext {
1338 #[allow(clippy::vec_init_then_push)] pub fn new_global_specs(specs: &[SideMetadataSpec]) -> Vec<SideMetadataSpec> {
1340 let mut ret = vec![];
1341
1342 #[cfg(feature = "vo_bit")]
1343 ret.push(VO_BIT_SIDE_METADATA_SPEC);
1344
1345 if let Some(spec) = crate::mmtk::SFT_MAP.get_side_metadata() {
1346 if spec.is_global {
1347 ret.push(*spec);
1348 }
1349 }
1350
1351 ret.push(crate::util::heap::chunk_map::ChunkMap::ALLOC_TABLE);
1355
1356 ret.extend_from_slice(specs);
1357 ret
1358 }
1359
1360 pub fn get_local_specs(&self) -> &[SideMetadataSpec] {
1361 &self.local
1362 }
1363
1364 pub fn calculate_reserved_pages(&self, data_pages: usize) -> usize {
1371 let mut total = 0;
1372 for spec in self.global.iter() {
1373 total += data_to_meta_size_round_up(spec, data_pages);
1377 }
1378 for spec in self.local.iter() {
1379 total += data_to_meta_size_round_up(spec, data_pages);
1380 }
1381 total
1382 }
1383
1384 pub fn try_map_metadata_space(
1390 &self,
1391 start: Address,
1392 size: usize,
1393 space_name: &str,
1394 ) -> Result<()> {
1395 debug!(
1396 "try_map_metadata_space({}, 0x{:x}, {}, {})",
1397 start,
1398 size,
1399 self.global.len(),
1400 self.local.len()
1401 );
1402 debug_assert!(start.is_aligned_to(BYTES_IN_PAGE));
1404 debug_assert!(size % BYTES_IN_PAGE == 0);
1405 self.map_metadata_internal(start, size, false, space_name)
1406 }
1407
1408 pub fn try_map_metadata_address_range(
1413 &self,
1414 start: Address,
1415 size: usize,
1416 name: &str,
1417 ) -> Result<()> {
1418 debug!(
1419 "try_map_metadata_address_range({}, 0x{:x}, {}, {})",
1420 start,
1421 size,
1422 self.global.len(),
1423 self.local.len()
1424 );
1425 debug_assert!(start.is_aligned_to(BYTES_IN_CHUNK));
1427 debug_assert!(size % BYTES_IN_CHUNK == 0);
1428 self.map_metadata_internal(start, size, true, name)
1429 }
1430
1431 fn map_metadata_internal(
1439 &self,
1440 start: Address,
1441 size: usize,
1442 no_reserve: bool,
1443 space_name: &str,
1444 ) -> Result<()> {
1445 for spec in self.global.iter() {
1446 let anno = MmapAnnotation::SideMeta {
1447 space: space_name,
1448 meta: spec.name,
1449 };
1450 match try_mmap_contiguous_metadata_space(start, size, spec, no_reserve, &anno) {
1451 Ok(_) => {}
1452 Err(e) => return Result::Err(e),
1453 }
1454 }
1455
1456 #[cfg(target_pointer_width = "32")]
1457 let mut lsize: usize = 0;
1458
1459 for spec in self.local.iter() {
1460 #[cfg(target_pointer_width = "64")]
1472 {
1473 let anno = MmapAnnotation::SideMeta {
1474 space: space_name,
1475 meta: spec.name,
1476 };
1477 match try_mmap_contiguous_metadata_space(start, size, spec, no_reserve, &anno) {
1478 Ok(_) => {}
1479 Err(e) => return Result::Err(e),
1480 }
1481 }
1482 #[cfg(target_pointer_width = "32")]
1483 {
1484 lsize += metadata_bytes_per_chunk(spec.log_bytes_in_region, spec.log_num_of_bits);
1485 }
1486 }
1487
1488 #[cfg(target_pointer_width = "32")]
1489 if lsize > 0 {
1490 let max = BYTES_IN_CHUNK >> super::constants::LOG_LOCAL_SIDE_METADATA_WORST_CASE_RATIO;
1491 debug_assert!(
1492 lsize <= max,
1493 "local side metadata per chunk (0x{:x}) must be less than (0x{:x})",
1494 lsize,
1495 max
1496 );
1497 let anno = MmapAnnotation::SideMeta {
1500 space: space_name,
1501 meta: "all",
1502 };
1503 match try_map_per_chunk_metadata_space(start, size, lsize, no_reserve, &anno) {
1504 Ok(_) => {}
1505 Err(e) => return Result::Err(e),
1506 }
1507 }
1508
1509 Ok(())
1510 }
1511
1512 #[cfg(test)]
1520 pub fn ensure_unmap_metadata_space(&self, start: Address, size: usize) {
1521 trace!("ensure_unmap_metadata_space({}, 0x{:x})", start, size);
1522 debug_assert!(start.is_aligned_to(BYTES_IN_PAGE));
1523 debug_assert!(size % BYTES_IN_PAGE == 0);
1524
1525 for spec in self.global.iter() {
1526 ensure_munmap_contiguous_metadata_space(start, size, spec);
1527 }
1528
1529 for spec in self.local.iter() {
1530 #[cfg(target_pointer_width = "64")]
1531 {
1532 ensure_munmap_contiguous_metadata_space(start, size, spec);
1533 }
1534 #[cfg(target_pointer_width = "32")]
1535 {
1536 ensure_munmap_chunked_metadata_space(start, size, spec);
1537 }
1538 }
1539 }
1540}
1541
1542pub struct MetadataByteArrayRef<const ENTRIES: usize> {
1544 #[cfg(feature = "extreme_assertions")]
1545 heap_range_start: Address,
1546 #[cfg(feature = "extreme_assertions")]
1547 spec: SideMetadataSpec,
1548 data: &'static [u8; ENTRIES],
1549}
1550
1551impl<const ENTRIES: usize> MetadataByteArrayRef<ENTRIES> {
1552 pub fn new(metadata_spec: &SideMetadataSpec, start: Address, bytes: usize) -> Self {
1561 debug_assert_eq!(
1562 metadata_spec.log_num_of_bits, LOG_BITS_IN_BYTE as usize,
1563 "Each heap entry should map to a byte in side-metadata"
1564 );
1565 debug_assert_eq!(
1566 bytes >> metadata_spec.log_bytes_in_region,
1567 ENTRIES,
1568 "Heap range size and MetadataByteArray size does not match"
1569 );
1570 Self {
1571 #[cfg(feature = "extreme_assertions")]
1572 heap_range_start: start,
1573 #[cfg(feature = "extreme_assertions")]
1574 spec: *metadata_spec,
1575 data: unsafe { &*address_to_meta_address(metadata_spec, start).to_ptr() },
1578 }
1579 }
1580
1581 #[allow(clippy::len_without_is_empty)]
1583 pub const fn len(&self) -> usize {
1584 ENTRIES
1585 }
1586
1587 #[allow(clippy::let_and_return)]
1589 pub fn get(&self, index: usize) -> u8 {
1590 #[cfg(feature = "extreme_assertions")]
1591 let _lock = sanity::SANITY_LOCK.lock().unwrap();
1592 let value = self.data[index];
1593 #[cfg(feature = "extreme_assertions")]
1594 {
1595 let data_addr = self.heap_range_start + (index << self.spec.log_bytes_in_region);
1596 sanity::verify_load::<u8>(&self.spec, data_addr, value);
1597 }
1598 value
1599 }
1600}
1601
1602#[cfg(test)]
1603mod tests {
1604 use super::*;
1605 use crate::mmap_anno_test;
1606 use crate::util::metadata::side_metadata::SideMetadataContext;
1607
1608 pub const ZERO_OFFSET: SideMetadataOffset = SideMetadataOffset { rel_offset: 0 };
1610
1611 #[test]
1612 fn calculate_reserved_pages_one_spec() {
1613 let spec = SideMetadataSpec {
1615 name: "test_spec",
1616 is_global: true,
1617 offset: ZERO_OFFSET,
1618 log_num_of_bits: 0,
1619 log_bytes_in_region: 3,
1620 };
1621 let side_metadata = SideMetadataContext {
1622 global: vec![spec],
1623 local: vec![],
1624 };
1625 assert_eq!(side_metadata.calculate_reserved_pages(0), 0);
1626 assert_eq!(side_metadata.calculate_reserved_pages(63), 1);
1627 assert_eq!(side_metadata.calculate_reserved_pages(64), 1);
1628 assert_eq!(side_metadata.calculate_reserved_pages(65), 2);
1629 assert_eq!(side_metadata.calculate_reserved_pages(1024), 16);
1630 }
1631
1632 #[test]
1633 fn calculate_reserved_pages_multi_specs() {
1634 let gspec = SideMetadataSpec {
1636 name: "gspec",
1637 is_global: true,
1638 offset: ZERO_OFFSET,
1639 log_num_of_bits: 0,
1640 log_bytes_in_region: 3,
1641 };
1642 let lspec = SideMetadataSpec {
1644 name: "lspec",
1645 is_global: false,
1646 offset: ZERO_OFFSET,
1647 log_num_of_bits: 1,
1648 log_bytes_in_region: 12,
1649 };
1650 let side_metadata = SideMetadataContext {
1651 global: vec![gspec],
1652 local: vec![lspec],
1653 };
1654 assert_eq!(side_metadata.calculate_reserved_pages(1024), 16 + 1);
1655 }
1656
1657 use crate::util::heap::layout::vm_layout;
1658 use crate::util::test_util::{serial_test, with_cleanup};
1659 use paste::paste;
1660
1661 const TEST_LOG_BYTES_IN_REGION: usize = 12;
1662
1663 fn test_side_metadata(
1664 log_bits: usize,
1665 f: impl Fn(&SideMetadataSpec, Address, Address) + std::panic::RefUnwindSafe,
1666 ) {
1667 serial_test(|| {
1668 let spec = SideMetadataSpec {
1669 name: "Test Spec $tname",
1670 is_global: true,
1671 offset: SideMetadataOffset::addr(GLOBAL_SIDE_METADATA_BASE_ADDRESS),
1672 log_num_of_bits: log_bits,
1673 log_bytes_in_region: TEST_LOG_BYTES_IN_REGION, };
1675 let context = SideMetadataContext {
1676 global: vec![spec],
1677 local: vec![],
1678 };
1679 let mut sanity = SideMetadataSanity::new();
1680 sanity.verify_metadata_context("TestPolicy", &context);
1681
1682 let data_addr = vm_layout::vm_layout().heap_start;
1683 crate::MMAPPER
1685 .ensure_mapped(
1686 data_addr,
1687 1,
1688 HugePageSupport::No,
1689 MmapProtection::ReadWrite,
1690 mmap_anno_test!(),
1691 )
1692 .unwrap();
1693 let meta_addr = address_to_meta_address(&spec, data_addr);
1694 with_cleanup(
1695 || {
1696 let mmap_result =
1697 context.try_map_metadata_space(data_addr, BYTES_IN_PAGE, "test_space");
1698 assert!(mmap_result.is_ok(), "{:?}", mmap_result);
1699
1700 f(&spec, data_addr, meta_addr);
1701 },
1702 || {
1703 assert!(log_bits <= 6);
1705 let meta_ptr: *mut u64 = meta_addr.to_mut_ptr();
1706 unsafe { *meta_ptr = 0 };
1707
1708 sanity::reset();
1709 },
1710 )
1711 })
1712 }
1713
1714 fn max_value(log_bits: usize) -> u64 {
1715 (0..(1 << log_bits)).fold(0, |accum, x| accum + (1 << x))
1716 }
1717 #[test]
1718 fn test_max_value() {
1719 assert_eq!(max_value(0), 1);
1720 assert_eq!(max_value(1), 0b11);
1721 assert_eq!(max_value(2), 0b1111);
1722 assert_eq!(max_value(3), 255);
1723 assert_eq!(max_value(4), 65535);
1724 }
1725
1726 macro_rules! test_side_metadata_access {
1727 ($tname: ident, $type: ty, $log_bits: expr) => {
1728 paste!{
1729 #[test]
1730 fn [<$tname _load>]() {
1731 test_side_metadata($log_bits, |spec, data_addr, meta_addr| {
1732 let meta_ptr: *mut $type = meta_addr.to_mut_ptr();
1733
1734 assert_eq!(unsafe { spec.load::<$type>(data_addr) }, 0);
1736 assert_eq!(spec.load_atomic::<$type>(data_addr, Ordering::SeqCst), 0);
1737
1738 let max_value: $type = max_value($log_bits) as _;
1740 unsafe { spec.store::<$type>(data_addr, max_value); }
1741 assert_eq!(unsafe { spec.load::<$type>(data_addr) }, max_value);
1742 assert_eq!(spec.load_atomic::<$type>(data_addr, Ordering::SeqCst), max_value);
1743 assert_eq!(unsafe { *meta_ptr }, max_value);
1744 });
1745 }
1746
1747 #[test]
1748 fn [<$tname _store>]() {
1749 test_side_metadata($log_bits, |spec, data_addr, meta_addr| {
1750 let meta_ptr: *mut $type = meta_addr.to_mut_ptr();
1751 let max_value: $type = max_value($log_bits) as _;
1752
1753 unsafe { *meta_ptr = <$type>::MAX; }
1755 unsafe { spec.store::<$type>(data_addr, 0); }
1757 assert_eq!(unsafe { spec.load::<$type>(data_addr) }, 0);
1758 assert_eq!(unsafe { *meta_ptr }, <$type>::MAX & (!max_value));
1760 });
1761 }
1762
1763 #[test]
1764 fn [<$tname _atomic_store>]() {
1765 test_side_metadata($log_bits, |spec, data_addr, meta_addr| {
1766 let meta_ptr: *mut $type = meta_addr.to_mut_ptr();
1767 let max_value: $type = max_value($log_bits) as _;
1768
1769 unsafe { *meta_ptr = <$type>::MAX; }
1771 spec.store_atomic::<$type>(data_addr, 0, Ordering::SeqCst);
1773 assert_eq!(unsafe { spec.load::<$type>(data_addr) }, 0);
1774 assert_eq!(unsafe { *meta_ptr }, <$type>::MAX & (!max_value));
1776 });
1777 }
1778
1779 #[test]
1780 fn [<$tname _compare_exchange_success>]() {
1781 test_side_metadata($log_bits, |spec, data_addr, meta_addr| {
1782 let meta_ptr: *mut $type = meta_addr.to_mut_ptr();
1783 let max_value: $type = max_value($log_bits) as _;
1784 unsafe { *meta_ptr = <$type>::MAX; }
1786 spec.store_atomic::<$type>(data_addr, 1, Ordering::SeqCst);
1788
1789 let old_val = spec.load_atomic::<$type>(data_addr, Ordering::SeqCst);
1790 assert_eq!(old_val, 1);
1791
1792 let new_val = 0;
1793 let res = spec.compare_exchange_atomic::<$type>(data_addr, old_val, new_val, Ordering::SeqCst, Ordering::SeqCst);
1794 assert!(res.is_ok());
1795 assert_eq!(res.unwrap(), old_val, "old vals do not match");
1796
1797 let after_update = spec.load_atomic::<$type>(data_addr, Ordering::SeqCst);
1798 assert_eq!(after_update, new_val);
1799 assert_eq!(unsafe { *meta_ptr }, <$type>::MAX & (!max_value));
1801 });
1802 }
1803
1804 #[test]
1805 fn [<$tname _compare_exchange_fail>]() {
1806 test_side_metadata($log_bits, |spec, data_addr, meta_addr| {
1807 let meta_ptr: *mut $type = meta_addr.to_mut_ptr();
1808 unsafe { *meta_ptr = <$type>::MAX; }
1810 spec.store_atomic::<$type>(data_addr, 1, Ordering::SeqCst);
1812
1813 let old_val = spec.load_atomic::<$type>(data_addr, Ordering::SeqCst);
1814 assert_eq!(old_val, 1);
1815
1816 spec.store_atomic::<$type>(data_addr, 0, Ordering::SeqCst);
1818 let bits_before_cas = unsafe { *meta_ptr };
1819
1820 let new_val = 0;
1821 let res = spec.compare_exchange_atomic::<$type>(data_addr, old_val, new_val, Ordering::SeqCst, Ordering::SeqCst);
1822 assert!(res.is_err());
1823 assert_eq!(res.err().unwrap(), 0);
1824 let bits_after_cas = unsafe { *meta_ptr };
1825 assert_eq!(bits_before_cas, bits_after_cas);
1826 });
1827 }
1828
1829 #[test]
1830 fn [<$tname _fetch_add_1>]() {
1831 test_side_metadata($log_bits, |spec, data_addr, meta_addr| {
1832 let meta_ptr: *mut $type = meta_addr.to_mut_ptr();
1833 unsafe { *meta_ptr = <$type>::MAX; }
1835 spec.store_atomic::<$type>(data_addr, 0, Ordering::SeqCst);
1837
1838 let old_val = spec.load_atomic::<$type>(data_addr, Ordering::SeqCst);
1839
1840 let old_val_from_fetch = spec.fetch_add_atomic::<$type>(data_addr, 1, Ordering::SeqCst);
1841 assert_eq!(old_val_from_fetch, old_val);
1842
1843 let new_val = spec.load_atomic::<$type>(data_addr, Ordering::SeqCst);
1844 assert_eq!(new_val, 1);
1845 });
1846 }
1847
1848 #[test]
1849 fn [<$tname _fetch_add_max>]() {
1850 test_side_metadata($log_bits, |spec, data_addr, meta_addr| {
1851 let meta_ptr: *mut $type = meta_addr.to_mut_ptr();
1852 let max_value: $type = max_value($log_bits) as _;
1853 unsafe { *meta_ptr = <$type>::MAX; }
1855 spec.store_atomic::<$type>(data_addr, 0, Ordering::SeqCst);
1857
1858 let old_val = spec.load_atomic::<$type>(data_addr, Ordering::SeqCst);
1859
1860 let old_val_from_fetch = spec.fetch_add_atomic::<$type>(data_addr, max_value, Ordering::SeqCst);
1861 assert_eq!(old_val_from_fetch, old_val);
1862
1863 let new_val = spec.load_atomic::<$type>(data_addr, Ordering::SeqCst);
1864 assert_eq!(new_val, max_value);
1865 });
1866 }
1867
1868 #[test]
1869 fn [<$tname _fetch_add_overflow>]() {
1870 test_side_metadata($log_bits, |spec, data_addr, meta_addr| {
1871 let meta_ptr: *mut $type = meta_addr.to_mut_ptr();
1872 let max_value: $type = max_value($log_bits) as _;
1873 unsafe { *meta_ptr = <$type>::MAX; }
1875 spec.store_atomic::<$type>(data_addr, max_value, Ordering::SeqCst);
1877
1878 let old_val = spec.load_atomic::<$type>(data_addr, Ordering::SeqCst);
1879
1880 let old_val_from_fetch = spec.fetch_add_atomic::<$type>(data_addr, 1, Ordering::SeqCst);
1882 assert_eq!(old_val_from_fetch, old_val);
1883
1884 let new_val = spec.load_atomic::<$type>(data_addr, Ordering::SeqCst);
1885 assert_eq!(new_val, 0);
1886 });
1887 }
1888
1889 #[test]
1890 fn [<$tname _fetch_sub_1>]() {
1891 test_side_metadata($log_bits, |spec, data_addr, meta_addr| {
1892 let meta_ptr: *mut $type = meta_addr.to_mut_ptr();
1893 unsafe { *meta_ptr = <$type>::MAX; }
1895 spec.store_atomic::<$type>(data_addr, 1, Ordering::SeqCst);
1897
1898 let old_val = spec.load_atomic::<$type>(data_addr, Ordering::SeqCst);
1899
1900 let old_val_from_fetch = spec.fetch_sub_atomic::<$type>(data_addr, 1, Ordering::SeqCst);
1901 assert_eq!(old_val_from_fetch, old_val);
1902
1903 let new_val = spec.load_atomic::<$type>(data_addr, Ordering::SeqCst);
1904 assert_eq!(new_val, 0);
1905 });
1906 }
1907
1908 #[test]
1909 fn [<$tname _fetch_sub_max>]() {
1910 test_side_metadata($log_bits, |spec, data_addr, meta_addr| {
1911 let meta_ptr: *mut $type = meta_addr.to_mut_ptr();
1912 let max_value: $type = max_value($log_bits) as _;
1913 unsafe { *meta_ptr = <$type>::MAX; }
1915 spec.store_atomic::<$type>(data_addr, max_value, Ordering::SeqCst);
1917
1918 let old_val = spec.load_atomic::<$type>(data_addr, Ordering::SeqCst);
1919
1920 let old_val_from_fetch = spec.fetch_sub_atomic::<$type>(data_addr, max_value, Ordering::SeqCst);
1921 assert_eq!(old_val_from_fetch, old_val);
1922
1923 let new_val = spec.load_atomic::<$type>(data_addr, Ordering::SeqCst);
1924 assert_eq!(new_val, 0);
1925 });
1926 }
1927
1928 #[test]
1929 fn [<$tname _fetch_sub_overflow>]() {
1930 test_side_metadata($log_bits, |spec, data_addr, meta_addr| {
1931 let meta_ptr: *mut $type = meta_addr.to_mut_ptr();
1932 let max_value: $type = max_value($log_bits) as _;
1933 unsafe { *meta_ptr = <$type>::MAX; }
1935 spec.store_atomic::<$type>(data_addr, 0, Ordering::SeqCst);
1937
1938 let old_val = spec.load_atomic::<$type>(data_addr, Ordering::SeqCst);
1939
1940 let old_val_from_fetch = spec.fetch_sub_atomic::<$type>(data_addr, 1, Ordering::SeqCst);
1942 assert_eq!(old_val_from_fetch, old_val);
1943
1944 let new_val = spec.load_atomic::<$type>(data_addr, Ordering::SeqCst);
1945 assert_eq!(new_val, max_value);
1946 });
1947 }
1948
1949 #[test]
1950 fn [<$tname _fetch_and>]() {
1951 test_side_metadata($log_bits, |spec, data_addr, meta_addr| {
1952 let meta_ptr: *mut $type = meta_addr.to_mut_ptr();
1953 let max_value: $type = max_value($log_bits) as _;
1954 unsafe { *meta_ptr = <$type>::MAX; }
1956 spec.store_atomic::<$type>(data_addr, max_value, Ordering::SeqCst);
1958
1959 let old_val = spec.load_atomic::<$type>(data_addr, Ordering::SeqCst);
1961 let old_val_from_fetch = spec.fetch_and_atomic::<$type>(data_addr, max_value, Ordering::SeqCst);
1962 assert_eq!(old_val_from_fetch, old_val, "old values do not match");
1963 assert_eq!(spec.load_atomic::<$type>(data_addr, Ordering::SeqCst), max_value, "load values do not match");
1964 assert_eq!(unsafe { *meta_ptr }, <$type>::MAX, "raw values do not match");
1965
1966 let last_bit_zero = max_value - 1;
1968 let old_val = spec.load_atomic::<$type>(data_addr, Ordering::SeqCst);
1969 let old_val_from_fetch = spec.fetch_and_atomic::<$type>(data_addr, last_bit_zero, Ordering::SeqCst);
1970 assert_eq!(old_val_from_fetch, old_val);
1971 assert_eq!(spec.load_atomic::<$type>(data_addr, Ordering::SeqCst), last_bit_zero);
1972 assert_eq!(unsafe { *meta_ptr }, <$type>::MAX - 1);
1973 });
1974 }
1975
1976 #[test]
1977 fn [<$tname _fetch_or>]() {
1978 test_side_metadata($log_bits, |spec, data_addr, meta_addr| {
1979 let meta_ptr: *mut $type = meta_addr.to_mut_ptr();
1980 let max_value: $type = max_value($log_bits) as _;
1981 unsafe { *meta_ptr = 0; }
1983 spec.store_atomic::<$type>(data_addr, 0, Ordering::SeqCst);
1985
1986 let old_val = spec.load_atomic::<$type>(data_addr, Ordering::SeqCst);
1988 let old_val_from_fetch = spec.fetch_or_atomic::<$type>(data_addr, 0, Ordering::SeqCst);
1989 assert_eq!(old_val_from_fetch, old_val);
1990 assert_eq!(spec.load_atomic::<$type>(data_addr, Ordering::SeqCst), 0);
1991 assert_eq!(unsafe { *meta_ptr }, 0);
1992
1993 let old_val = spec.load_atomic::<$type>(data_addr, Ordering::SeqCst);
1995 let old_val_from_fetch = spec.fetch_or_atomic::<$type>(data_addr, max_value, Ordering::SeqCst);
1996 assert_eq!(old_val_from_fetch, old_val);
1997 assert_eq!(spec.load_atomic::<$type>(data_addr, Ordering::SeqCst), max_value);
1998 assert_eq!(unsafe { *meta_ptr }, max_value);
1999 });
2000 }
2001
2002 #[test]
2003 fn [<$tname _fetch_update_success>]() {
2004 test_side_metadata($log_bits, |spec, data_addr, meta_addr| {
2005 let meta_ptr: *mut $type = meta_addr.to_mut_ptr();
2006 let max_value: $type = max_value($log_bits) as _;
2007 unsafe { *meta_ptr = <$type>::MAX; }
2009 spec.store_atomic::<$type>(data_addr, max_value, Ordering::SeqCst);
2011
2012 let old_val = spec.load_atomic::<$type>(data_addr, Ordering::SeqCst);
2014 let fetch_res = spec.fetch_update_atomic::<$type, _>(data_addr, Ordering::SeqCst, Ordering::SeqCst, |_x: $type| Some(0));
2015 assert!(fetch_res.is_ok());
2016 assert_eq!(fetch_res.unwrap(), old_val);
2017 assert_eq!(spec.load_atomic::<$type>(data_addr, Ordering::SeqCst), 0);
2018 assert_eq!(unsafe { *meta_ptr }, <$type>::MAX & (!max_value));
2020 });
2021 }
2022
2023 #[test]
2024 fn [<$tname _fetch_update_fail>]() {
2025 test_side_metadata($log_bits, |spec, data_addr, meta_addr| {
2026 let meta_ptr: *mut $type = meta_addr.to_mut_ptr();
2027 let max_value: $type = max_value($log_bits) as _;
2028 unsafe { *meta_ptr = <$type>::MAX; }
2030 spec.store_atomic::<$type>(data_addr, max_value, Ordering::SeqCst);
2032
2033 let old_val = spec.load_atomic::<$type>(data_addr, Ordering::SeqCst);
2035 let fetch_res = spec.fetch_update_atomic::<$type, _>(data_addr, Ordering::SeqCst, Ordering::SeqCst, |_x: $type| None);
2036 assert!(fetch_res.is_err());
2037 assert_eq!(fetch_res.err().unwrap(), old_val);
2038 assert_eq!(spec.load_atomic::<$type>(data_addr, Ordering::SeqCst), max_value);
2039 assert_eq!(unsafe { *meta_ptr }, <$type>::MAX);
2041 });
2042 }
2043
2044 #[test]
2045 fn [<$tname _find_prev_non_zero_value_easy>]() {
2046 test_side_metadata($log_bits, |spec, data_addr, _meta_addr| {
2047 let max_value: $type = max_value($log_bits) as _;
2048 spec.store_atomic::<$type>(data_addr, max_value, Ordering::SeqCst);
2050
2051 let res_addr = unsafe { spec.find_prev_non_zero_value::<$type>(data_addr, 8) };
2054 assert!(res_addr.is_some());
2055 assert_eq!(res_addr.unwrap(), data_addr);
2056 });
2057 }
2058
2059 #[test]
2060 fn [<$tname _find_prev_non_zero_value_arbitrary_bytes>]() {
2061 test_side_metadata($log_bits, |spec, data_addr, _meta_addr| {
2062 let max_value: $type = max_value($log_bits) as _;
2063 spec.store_atomic::<$type>(data_addr, max_value, Ordering::SeqCst);
2065
2066 let test_region = (1 << TEST_LOG_BYTES_IN_REGION);
2068 for len in 1..(test_region*4) {
2069 let start_addr = data_addr + len;
2070 let res_addr = unsafe { spec.find_prev_non_zero_value::<$type>(start_addr, len + 1) };
2072 assert!(res_addr.is_some());
2073 assert_eq!(res_addr.unwrap(), data_addr);
2074 }
2075 });
2076 }
2077
2078 #[test]
2079 fn [<$tname _find_prev_non_zero_value_arbitrary_start>]() {
2080 test_side_metadata($log_bits, |spec, data_addr, _meta_addr| {
2081 let max_value: $type = max_value($log_bits) as _;
2082
2083 for offset in 0..7usize {
2085 let test_data_addr = data_addr + offset;
2087 spec.store_atomic::<$type>(test_data_addr, max_value, Ordering::SeqCst);
2088
2089 let res_addr = unsafe { spec.find_prev_non_zero_value::<$type>(test_data_addr, 4096) };
2091 assert!(res_addr.is_some());
2092 assert_eq!(res_addr.unwrap(), data_addr);
2093
2094 spec.store_atomic::<$type>(test_data_addr, 0, Ordering::SeqCst);
2096 }
2097 });
2098 }
2099
2100 #[test]
2101 fn [<$tname _find_prev_non_zero_value_no_find>]() {
2102 test_side_metadata($log_bits, |spec, data_addr, _meta_addr| {
2103 spec.store_atomic::<$type>(data_addr, 0, Ordering::SeqCst);
2105
2106 let test_region = (1 << TEST_LOG_BYTES_IN_REGION);
2108 for len in 1..(test_region*4) {
2109 let start_addr = data_addr + len;
2110 let res_addr = unsafe { spec.find_prev_non_zero_value::<$type>(start_addr, len + 1) };
2112 assert!(res_addr.is_none());
2113 }
2114 });
2115 }
2116 }
2117 }
2118 }
2119
2120 test_side_metadata_access!(test_u1, u8, 0);
2121 test_side_metadata_access!(test_u2, u8, 1);
2122 test_side_metadata_access!(test_u4, u8, 2);
2123 test_side_metadata_access!(test_u8, u8, 3);
2124 test_side_metadata_access!(test_u16, u16, 4);
2125 test_side_metadata_access!(test_u32, u32, 5);
2126 test_side_metadata_access!(test_u64, u64, 6);
2127 test_side_metadata_access!(
2128 test_usize,
2129 usize,
2130 if cfg!(target_pointer_width = "64") {
2131 6
2132 } else if cfg!(target_pointer_width = "32") {
2133 5
2134 } else {
2135 unreachable!()
2136 }
2137 );
2138
2139 #[test]
2140 fn test_bulk_update_meta_bits() {
2141 let raw_mem =
2142 unsafe { std::alloc::alloc_zeroed(std::alloc::Layout::from_size_align(8, 8).unwrap()) };
2143 let addr = Address::from_mut_ptr(raw_mem);
2144
2145 SideMetadataSpec::set_meta_bits(addr, 0, addr, 4);
2146 assert_eq!(unsafe { addr.load::<u64>() }, 0b1111);
2147
2148 SideMetadataSpec::zero_meta_bits(addr, 1, addr, 3);
2149 assert_eq!(unsafe { addr.load::<u64>() }, 0b1001);
2150
2151 SideMetadataSpec::set_meta_bits(addr, 2, addr, 6);
2152 assert_eq!(unsafe { addr.load::<u64>() }, 0b0011_1101);
2153
2154 SideMetadataSpec::zero_meta_bits(addr, 0, addr + 1usize, 0);
2155 assert_eq!(unsafe { addr.load::<u64>() }, 0b0);
2156
2157 SideMetadataSpec::set_meta_bits(addr, 2, addr + 1usize, 2);
2158 assert_eq!(unsafe { addr.load::<u64>() }, 0b11_1111_1100);
2159
2160 SideMetadataSpec::set_meta_bits(addr, 0, addr + 1usize, 2);
2161 assert_eq!(unsafe { addr.load::<u64>() }, 0b11_1111_1111);
2162 }
2163}