1use super::*;
2use crate::util::constants::{BYTES_IN_PAGE, BYTES_IN_WORD, LOG_BITS_IN_BYTE};
3use crate::util::conversions::raw_align_up;
4use crate::util::heap::layout::vm_layout::BYTES_IN_CHUNK;
5use crate::util::metadata::metadata_val_traits::*;
6#[cfg(feature = "vo_bit")]
7use crate::util::metadata::vo_bit::VO_BIT_SIDE_METADATA_SPEC;
8use crate::util::os::*;
9use crate::util::Address;
10use num_traits::FromPrimitive;
11use ranges::BitByteRange;
12use std::fmt;
13use std::sync::atomic::{AtomicU8, Ordering};
14
15#[derive(Clone, Copy, PartialEq, Eq, Hash)]
22pub struct SideMetadataSpec {
23 pub name: &'static str,
25 pub is_global: bool,
28 pub offset: SideMetadataOffset,
30 pub log_num_of_bits: usize,
32 pub log_bytes_in_region: usize,
34}
35
36impl SideMetadataSpec {
37 pub const fn uses_contiguous_side_metadata(&self) -> bool {
39 self.is_global || cfg!(target_pointer_width = "64")
40 }
41
42 pub const fn is_absolute_offset(&self) -> bool {
44 self.uses_contiguous_side_metadata()
45 }
46
47 pub const fn is_rel_offset(&self) -> bool {
49 !self.is_absolute_offset()
50 }
51
52 pub const fn get_absolute_offset(&self) -> Address {
54 debug_assert!(self.is_absolute_offset());
55 unsafe { self.offset.addr }
56 }
57
58 pub const fn get_rel_offset(&self) -> usize {
60 debug_assert!(self.is_rel_offset());
61 unsafe { self.offset.rel_offset }
62 }
63
64 #[cfg(target_pointer_width = "64")]
66 pub const fn upper_bound_offset(&self) -> SideMetadataOffset {
67 debug_assert!(self.is_absolute_offset());
68 SideMetadataOffset {
69 addr: unsafe { self.offset.addr }
70 .add(crate::util::metadata::side_metadata::metadata_address_range_size(self)),
71 }
72 }
73
74 #[cfg(target_pointer_width = "32")]
76 pub const fn upper_bound_offset(&self) -> SideMetadataOffset {
77 if self.is_absolute_offset() {
78 SideMetadataOffset {
79 addr: unsafe { self.offset.addr }
80 .add(crate::util::metadata::side_metadata::metadata_address_range_size(self)),
81 }
82 } else {
83 SideMetadataOffset {
84 rel_offset: unsafe { self.offset.rel_offset }
85 + crate::util::metadata::side_metadata::metadata_bytes_per_chunk(
86 self.log_bytes_in_region,
87 self.log_num_of_bits,
88 ),
89 }
90 }
91 }
92
93 pub const fn upper_bound_address_for_contiguous(&self) -> Address {
98 debug_assert!(self.is_absolute_offset());
99 unsafe { self.upper_bound_offset().addr }
100 }
101
102 #[cfg(target_pointer_width = "32")]
107 pub const fn upper_bound_address_for_chunked(&self, data_addr: Address) -> Address {
108 debug_assert!(self.is_rel_offset());
109 address_to_meta_chunk_addr(data_addr).add(unsafe { self.upper_bound_offset().rel_offset })
110 }
111
112 #[cfg(debug_assertions)]
115 pub(crate) fn assert_metadata_mapped(&self, data_addr: Address) {
116 let meta_start = address_to_meta_address(self, data_addr).align_down(BYTES_IN_PAGE);
117
118 trace!(
119 "ensure_metadata_is_mapped({}).meta_start({})",
120 data_addr,
121 meta_start
122 );
123
124 OS::panic_if_unmapped(meta_start, BYTES_IN_PAGE);
125 }
126
127 #[cfg(debug_assertions)]
128 pub(crate) fn are_different_metadata_bits(&self, addr1: Address, addr2: Address) -> bool {
129 let a1 = address_to_meta_address(self, addr1);
130 let a2 = address_to_meta_address(self, addr2);
131 let s1 = meta_byte_lshift(self, addr1);
132 let s2 = meta_byte_lshift(self, addr2);
133 (a1, s1) != (a2, s2)
134 }
135
136 #[cfg(debug_assertions)]
140 fn assert_value_type<T: MetadataValue>(&self, val: Option<T>) {
141 let log_b = self.log_num_of_bits;
142 match log_b {
143 _ if log_b < 3 => {
144 assert_eq!(T::LOG2, 3);
145 if let Some(v) = val {
146 assert!(
147 v.to_u8().unwrap() < (1 << (1 << log_b)),
148 "Input value {:?} is invalid for the spec {:?}",
149 v,
150 self
151 );
152 }
153 }
154 3..=6 => assert_eq!(T::LOG2, log_b as u32),
155 _ => unreachable!("side metadata > {}-bits is not supported", 1 << log_b),
156 }
157 }
158
159 pub(crate) fn is_mapped(&self, data_addr: Address) -> bool {
161 use crate::MMAPPER;
162 let meta_addr = address_to_meta_address(self, data_addr);
163 MMAPPER.is_mapped_address(meta_addr)
164 }
165
166 pub(crate) fn zero_meta_bits(
168 meta_start_addr: Address,
169 meta_start_bit: u8,
170 meta_end_addr: Address,
171 meta_end_bit: u8,
172 ) {
173 let mut visitor = |range| {
174 match range {
175 BitByteRange::Bytes { start, end } => {
176 crate::util::memory::zero(start, end - start);
177 false
178 }
179 BitByteRange::BitsInByte {
180 addr,
181 bit_start,
182 bit_end,
183 } => {
184 let mask: u8 =
187 u8::MAX.checked_shl(bit_end as u32).unwrap_or(0) | !(u8::MAX << bit_start);
188 unsafe { addr.as_ref::<AtomicU8>() }.fetch_and(mask, Ordering::SeqCst);
189 false
190 }
191 }
192 };
193 ranges::break_bit_range(
194 meta_start_addr,
195 meta_start_bit,
196 meta_end_addr,
197 meta_end_bit,
198 true,
199 &mut visitor,
200 );
201 }
202
203 pub(crate) fn set_meta_bits(
205 meta_start_addr: Address,
206 meta_start_bit: u8,
207 meta_end_addr: Address,
208 meta_end_bit: u8,
209 ) {
210 let mut visitor = |range| {
211 match range {
212 BitByteRange::Bytes { start, end } => {
213 crate::util::memory::set(start, 0xff, end - start);
214 false
215 }
216 BitByteRange::BitsInByte {
217 addr,
218 bit_start,
219 bit_end,
220 } => {
221 let mask: u8 = !(u8::MAX.checked_shl(bit_end as u32).unwrap_or(0))
224 & (u8::MAX << bit_start);
225 unsafe { addr.as_ref::<AtomicU8>() }.fetch_or(mask, Ordering::SeqCst);
226 false
227 }
228 }
229 };
230 ranges::break_bit_range(
231 meta_start_addr,
232 meta_start_bit,
233 meta_end_addr,
234 meta_end_bit,
235 true,
236 &mut visitor,
237 );
238 }
239
240 pub(super) fn bulk_update_metadata(
243 &self,
244 start: Address,
245 size: usize,
246 update_meta_bits: &impl Fn(Address, u8, Address, u8),
247 ) {
248 let update_contiguous = |data_start: Address, data_bytes: usize| {
251 if data_bytes == 0 {
252 return;
253 }
254 let meta_start = address_to_meta_address(self, data_start);
255 let meta_start_shift = meta_byte_lshift(self, data_start);
256 let meta_end = address_to_meta_address(self, data_start + data_bytes);
257 let meta_end_shift = meta_byte_lshift(self, data_start + data_bytes);
258 update_meta_bits(meta_start, meta_start_shift, meta_end, meta_end_shift);
259 };
260
261 #[cfg(target_pointer_width = "32")]
269 let update_discontiguous = |data_start: Address, data_bytes: usize| {
270 use crate::util::constants::BITS_IN_BYTE;
271 if data_bytes == 0 {
272 return;
273 }
274 debug_assert_eq!(
275 data_start.align_down(BYTES_IN_CHUNK),
276 (data_start + data_bytes - 1).align_down(BYTES_IN_CHUNK),
277 "The data to be zeroed in discontiguous specs needs to be in the same chunk"
278 );
279 let meta_start = address_to_meta_address(self, data_start);
280 let meta_start_shift = meta_byte_lshift(self, data_start);
281 let meta_total_bits = (data_bytes >> self.log_bytes_in_region) << self.log_num_of_bits;
283 let meta_delta_bytes = meta_total_bits >> LOG_BITS_IN_BYTE;
284 let meta_delta_bits: u8 = (meta_total_bits % BITS_IN_BYTE) as u8;
285 let (meta_end, meta_end_shift) = {
287 let mut end_addr = meta_start + meta_delta_bytes;
288 let mut end_bit = meta_start_shift + meta_delta_bits;
289 if end_bit >= BITS_IN_BYTE as u8 {
290 end_bit -= BITS_IN_BYTE as u8;
291 end_addr += 1usize;
292 }
293 (end_addr, end_bit)
294 };
295
296 update_meta_bits(meta_start, meta_start_shift, meta_end, meta_end_shift);
297 };
298
299 if cfg!(target_pointer_width = "64") || self.is_global {
300 update_contiguous(start, size);
301 }
302 #[cfg(target_pointer_width = "32")]
303 if !self.is_global {
304 let chunk_num = ((start + size).align_down(BYTES_IN_CHUNK)
306 - start.align_down(BYTES_IN_CHUNK))
307 / BYTES_IN_CHUNK;
308 if chunk_num == 0 {
309 update_discontiguous(start, size);
310 } else {
311 let second_data_chunk = start.align_up(BYTES_IN_CHUNK);
312 update_discontiguous(start, second_data_chunk - start);
314
315 let last_data_chunk = (start + size).align_down(BYTES_IN_CHUNK);
316 update_discontiguous(last_data_chunk, start + size - last_data_chunk);
318 let mut next_data_chunk = second_data_chunk;
319
320 while next_data_chunk != last_data_chunk {
322 update_discontiguous(next_data_chunk, BYTES_IN_CHUNK);
323 next_data_chunk += BYTES_IN_CHUNK;
324 }
325 }
326 }
327 }
328
329 pub fn bzero_metadata(&self, start: Address, size: usize) {
339 #[cfg(feature = "extreme_assertions")]
340 let _lock = sanity::SANITY_LOCK.lock().unwrap();
341
342 #[cfg(feature = "extreme_assertions")]
343 sanity::verify_bzero(self, start, size);
344
345 self.bulk_update_metadata(start, size, &Self::zero_meta_bits)
346 }
347
348 pub fn bset_metadata(&self, start: Address, size: usize) {
358 #[cfg(feature = "extreme_assertions")]
359 let _lock = sanity::SANITY_LOCK.lock().unwrap();
360
361 #[cfg(feature = "extreme_assertions")]
362 sanity::verify_bset(self, start, size);
363
364 self.bulk_update_metadata(start, size, &Self::set_meta_bits)
365 }
366
367 pub fn bcopy_metadata_contiguous(&self, start: Address, size: usize, other: &SideMetadataSpec) {
380 #[cfg(feature = "extreme_assertions")]
381 let _lock = sanity::SANITY_LOCK.lock().unwrap();
382
383 #[cfg(feature = "extreme_assertions")]
384 sanity::verify_bcopy(self, start, size, other);
385
386 debug_assert_eq!(other.log_bytes_in_region, self.log_bytes_in_region);
387 debug_assert_eq!(other.log_num_of_bits, self.log_num_of_bits);
388
389 let dst_meta_start_addr = address_to_meta_address(self, start);
390 let dst_meta_start_bit = meta_byte_lshift(self, start);
391 let dst_meta_end_addr = address_to_meta_address(self, start + size);
392 let dst_meta_end_bit = meta_byte_lshift(self, start + size);
393
394 let src_meta_start_addr = address_to_meta_address(other, start);
395 let src_meta_start_bit = meta_byte_lshift(other, start);
396
397 debug_assert_eq!(dst_meta_start_bit, src_meta_start_bit);
398
399 let mut visitor = |range| {
400 match range {
401 BitByteRange::Bytes {
402 start: dst_start,
403 end: dst_end,
404 } => unsafe {
405 let byte_offset = dst_start - dst_meta_start_addr;
406 let src_start = src_meta_start_addr + byte_offset;
407 let size = dst_end - dst_start;
408 std::ptr::copy::<u8>(src_start.to_ptr(), dst_start.to_mut_ptr(), size);
409 false
410 },
411 BitByteRange::BitsInByte {
412 addr: dst,
413 bit_start,
414 bit_end,
415 } => {
416 let byte_offset = dst - dst_meta_start_addr;
417 let src = src_meta_start_addr + byte_offset;
418 let mask: u8 = !(u8::MAX.checked_shl(bit_end as u32).unwrap_or(0))
420 & (u8::MAX << bit_start); let old_src = unsafe { src.as_ref::<AtomicU8>() }.load(Ordering::Relaxed);
422 let old_dst = unsafe { dst.as_ref::<AtomicU8>() }.load(Ordering::Relaxed);
423 let new = (old_src & mask) | (old_dst & !mask);
424 unsafe { dst.as_ref::<AtomicU8>() }.store(new, Ordering::Relaxed);
425 false
426 }
427 }
428 };
429
430 ranges::break_bit_range(
431 dst_meta_start_addr,
432 dst_meta_start_bit,
433 dst_meta_end_addr,
434 dst_meta_end_bit,
435 true,
436 &mut visitor,
437 );
438 }
439
440 #[allow(unused_variables)] fn side_metadata_access<
448 const CHECK_VALUE: bool,
449 T: MetadataValue,
450 R: Copy,
451 F: FnOnce() -> R,
452 V: FnOnce(R),
453 >(
454 &self,
455 data_addr: Address,
456 input: Option<T>,
457 access_func: F,
458 verify_func: V,
459 ) -> R {
460 #[cfg(feature = "extreme_assertions")]
466 let _lock = sanity::SANITY_LOCK.lock().unwrap();
467
468 #[cfg(debug_assertions)]
470 {
471 if CHECK_VALUE {
472 self.assert_value_type::<T>(input);
473 }
474 #[cfg(feature = "extreme_assertions")]
475 self.assert_metadata_mapped(data_addr);
476 }
477
478 let ret = access_func();
480
481 if CHECK_VALUE {
483 verify_func(ret);
484 }
485
486 ret
487 }
488
489 pub unsafe fn load<T: MetadataValue>(&self, data_addr: Address) -> T {
498 self.side_metadata_access::<true, T, _, _, _>(
499 data_addr,
500 None,
501 || {
502 let meta_addr = address_to_meta_address(self, data_addr);
503 let bits_num_log = self.log_num_of_bits;
504 if bits_num_log < 3 {
505 let lshift = meta_byte_lshift(self, data_addr);
506 let mask = meta_byte_mask(self) << lshift;
507 let byte_val = meta_addr.load::<u8>();
508
509 FromPrimitive::from_u8((byte_val & mask) >> lshift).unwrap()
510 } else {
511 meta_addr.load::<T>()
512 }
513 },
514 |_v| {
515 #[cfg(feature = "extreme_assertions")]
516 sanity::verify_load(self, data_addr, _v);
517 },
518 )
519 }
520
521 pub unsafe fn store<T: MetadataValue>(&self, data_addr: Address, metadata: T) {
530 self.side_metadata_access::<true, T, _, _, _>(
531 data_addr,
532 Some(metadata),
533 || {
534 let meta_addr = address_to_meta_address(self, data_addr);
535 let bits_num_log = self.log_num_of_bits;
536 if bits_num_log < 3 {
537 let lshift = meta_byte_lshift(self, data_addr);
538 let mask = meta_byte_mask(self) << lshift;
539 let old_val = meta_addr.load::<u8>();
540 let new_val = (old_val & !mask) | (metadata.to_u8().unwrap() << lshift);
541
542 meta_addr.store::<u8>(new_val);
543 } else {
544 meta_addr.store::<T>(metadata);
545 }
546 },
547 |_| {
548 #[cfg(feature = "extreme_assertions")]
549 sanity::verify_store(self, data_addr, metadata);
550 },
551 )
552 }
553
554 pub fn load_atomic<T: MetadataValue>(&self, data_addr: Address, order: Ordering) -> T {
557 self.side_metadata_access::<true, T, _, _, _>(
558 data_addr,
559 None,
560 || {
561 let meta_addr = address_to_meta_address(self, data_addr);
562 let bits_num_log = self.log_num_of_bits;
563 if bits_num_log < 3 {
564 let lshift = meta_byte_lshift(self, data_addr);
565 let mask = meta_byte_mask(self) << lshift;
566 let byte_val = unsafe { meta_addr.atomic_load::<AtomicU8>(order) };
567 FromPrimitive::from_u8((byte_val & mask) >> lshift).unwrap()
568 } else {
569 unsafe { T::load_atomic(meta_addr, order) }
570 }
571 },
572 |_v| {
573 #[cfg(feature = "extreme_assertions")]
574 sanity::verify_load(self, data_addr, _v);
575 },
576 )
577 }
578
579 pub fn store_atomic<T: MetadataValue>(&self, data_addr: Address, metadata: T, order: Ordering) {
582 self.side_metadata_access::<true, T, _, _, _>(
583 data_addr,
584 Some(metadata),
585 || {
586 let meta_addr = address_to_meta_address(self, data_addr);
587 let bits_num_log = self.log_num_of_bits;
588 if bits_num_log < 3 {
589 let lshift = meta_byte_lshift(self, data_addr);
590 let mask = meta_byte_mask(self) << lshift;
591 let metadata_u8 = metadata.to_u8().unwrap();
592 let _ = unsafe {
593 <u8 as MetadataValue>::fetch_update(meta_addr, order, order, |v: u8| {
594 Some((v & !mask) | (metadata_u8 << lshift))
595 })
596 };
597 } else {
598 unsafe {
599 T::store_atomic(meta_addr, metadata, order);
600 }
601 }
602 },
603 |_| {
604 #[cfg(feature = "extreme_assertions")]
605 sanity::verify_store(self, data_addr, metadata);
606 },
607 )
608 }
609
610 pub unsafe fn set_zero(&self, data_addr: Address) {
620 use num_traits::Zero;
621 match self.log_num_of_bits {
622 0..=3 => self.store(data_addr, u8::zero()),
623 4 => self.store(data_addr, u16::zero()),
624 5 => self.store(data_addr, u32::zero()),
625 6 => self.store(data_addr, u64::zero()),
626 _ => unreachable!(),
627 }
628 }
629
630 pub fn set_zero_atomic(&self, data_addr: Address, order: Ordering) {
633 use num_traits::Zero;
634 match self.log_num_of_bits {
635 0..=3 => self.store_atomic(data_addr, u8::zero(), order),
636 4 => self.store_atomic(data_addr, u16::zero(), order),
637 5 => self.store_atomic(data_addr, u32::zero(), order),
638 6 => self.store_atomic(data_addr, u64::zero(), order),
639 _ => unreachable!(),
640 }
641 }
642
643 pub unsafe fn set_raw_byte_atomic(&self, data_addr: Address, order: Ordering) {
654 debug_assert!(self.log_num_of_bits < 3);
655 cfg_if::cfg_if! {
656 if #[cfg(feature = "extreme_assertions")] {
657 self.store_atomic::<u8>(data_addr, 1, order)
659 } else {
660 self.side_metadata_access::<false, u8, _, _, _>(
661 data_addr,
662 Some(1u8),
663 || {
664 let meta_addr = address_to_meta_address(self, data_addr);
665 u8::store_atomic(meta_addr, 0xffu8, order);
666 },
667 |_| {}
668 )
669 }
670 }
671 }
672
673 pub unsafe fn load_raw_byte(&self, data_addr: Address) -> u8 {
681 debug_assert!(self.log_num_of_bits < 3);
682 self.side_metadata_access::<false, u8, _, _, _>(
683 data_addr,
684 None,
685 || {
686 let meta_addr = address_to_meta_address(self, data_addr);
687 meta_addr.load::<u8>()
688 },
689 |_| {},
690 )
691 }
692
693 pub unsafe fn load_raw_word(&self, data_addr: Address) -> usize {
701 use crate::util::constants::*;
702 debug_assert!(self.log_num_of_bits < (LOG_BITS_IN_BYTE + LOG_BYTES_IN_ADDRESS) as usize);
703 self.side_metadata_access::<false, usize, _, _, _>(
704 data_addr,
705 None,
706 || {
707 let meta_addr = address_to_meta_address(self, data_addr);
708 let aligned_meta_addr = meta_addr.align_down(BYTES_IN_ADDRESS);
709 aligned_meta_addr.load::<usize>()
710 },
711 |_| {},
712 )
713 }
714
715 pub fn compare_exchange_atomic<T: MetadataValue>(
720 &self,
721 data_addr: Address,
722 old_metadata: T,
723 new_metadata: T,
724 success_order: Ordering,
725 failure_order: Ordering,
726 ) -> std::result::Result<T, T> {
727 self.side_metadata_access::<true, T, _, _, _>(
728 data_addr,
729 Some(new_metadata),
730 || {
731 let meta_addr = address_to_meta_address(self, data_addr);
732 let bits_num_log = self.log_num_of_bits;
733 if bits_num_log < 3 {
734 let lshift = meta_byte_lshift(self, data_addr);
735 let mask = meta_byte_mask(self) << lshift;
736
737 let real_old_byte = unsafe { meta_addr.atomic_load::<AtomicU8>(success_order) };
738 let expected_old_byte =
739 (real_old_byte & !mask) | ((old_metadata.to_u8().unwrap()) << lshift);
740 let expected_new_byte =
741 (expected_old_byte & !mask) | ((new_metadata.to_u8().unwrap()) << lshift);
742
743 unsafe {
744 meta_addr.compare_exchange::<AtomicU8>(
745 expected_old_byte,
746 expected_new_byte,
747 success_order,
748 failure_order,
749 )
750 }
751 .map(|x| FromPrimitive::from_u8((x & mask) >> lshift).unwrap())
752 .map_err(|x| FromPrimitive::from_u8((x & mask) >> lshift).unwrap())
753 } else {
754 unsafe {
755 T::compare_exchange(
756 meta_addr,
757 old_metadata,
758 new_metadata,
759 success_order,
760 failure_order,
761 )
762 }
763 }
764 },
765 |_res| {
766 #[cfg(feature = "extreme_assertions")]
767 if _res.is_ok() {
768 sanity::verify_store(self, data_addr, new_metadata);
769 }
770 },
771 )
772 }
773
774 fn fetch_ops_on_bits<F: Fn(u8) -> u8>(
777 &self,
778 data_addr: Address,
779 meta_addr: Address,
780 set_order: Ordering,
781 fetch_order: Ordering,
782 update: F,
783 ) -> u8 {
784 let lshift = meta_byte_lshift(self, data_addr);
785 let mask = meta_byte_mask(self) << lshift;
786
787 let old_raw_byte = unsafe {
788 <u8 as MetadataValue>::fetch_update(
789 meta_addr,
790 set_order,
791 fetch_order,
792 |raw_byte: u8| {
793 let old_val = (raw_byte & mask) >> lshift;
794 let new_val = update(old_val);
795 let new_raw_byte = (raw_byte & !mask) | ((new_val << lshift) & mask);
796 Some(new_raw_byte)
797 },
798 )
799 }
800 .unwrap();
801 (old_raw_byte & mask) >> lshift
802 }
803
804 pub fn fetch_add_atomic<T: MetadataValue>(
808 &self,
809 data_addr: Address,
810 val: T,
811 order: Ordering,
812 ) -> T {
813 self.side_metadata_access::<true, T, _, _, _>(
814 data_addr,
815 Some(val),
816 || {
817 let meta_addr = address_to_meta_address(self, data_addr);
818 let bits_num_log = self.log_num_of_bits;
819 if bits_num_log < 3 {
820 FromPrimitive::from_u8(self.fetch_ops_on_bits(
821 data_addr,
822 meta_addr,
823 order,
824 order,
825 |x: u8| x.wrapping_add(val.to_u8().unwrap()),
826 ))
827 .unwrap()
828 } else {
829 unsafe { T::fetch_add(meta_addr, val, order) }
830 }
831 },
832 |_old_val| {
833 #[cfg(feature = "extreme_assertions")]
834 sanity::verify_update::<T>(self, data_addr, _old_val, _old_val.wrapping_add(&val))
835 },
836 )
837 }
838
839 pub fn fetch_sub_atomic<T: MetadataValue>(
843 &self,
844 data_addr: Address,
845 val: T,
846 order: Ordering,
847 ) -> T {
848 self.side_metadata_access::<true, T, _, _, _>(
849 data_addr,
850 Some(val),
851 || {
852 let meta_addr = address_to_meta_address(self, data_addr);
853 if self.log_num_of_bits < 3 {
854 FromPrimitive::from_u8(self.fetch_ops_on_bits(
855 data_addr,
856 meta_addr,
857 order,
858 order,
859 |x: u8| x.wrapping_sub(val.to_u8().unwrap()),
860 ))
861 .unwrap()
862 } else {
863 unsafe { T::fetch_sub(meta_addr, val, order) }
864 }
865 },
866 |_old_val| {
867 #[cfg(feature = "extreme_assertions")]
868 sanity::verify_update::<T>(self, data_addr, _old_val, _old_val.wrapping_sub(&val))
869 },
870 )
871 }
872
873 pub fn fetch_and_atomic<T: MetadataValue>(
877 &self,
878 data_addr: Address,
879 val: T,
880 order: Ordering,
881 ) -> T {
882 self.side_metadata_access::<true, T, _, _, _>(
883 data_addr,
884 Some(val),
885 || {
886 let meta_addr = address_to_meta_address(self, data_addr);
887 if self.log_num_of_bits < 3 {
888 let lshift = meta_byte_lshift(self, data_addr);
889 let mask = meta_byte_mask(self) << lshift;
890 let rhs = (val.to_u8().unwrap() << lshift) | !mask;
892 let old_raw_byte =
893 unsafe { <u8 as MetadataValue>::fetch_and(meta_addr, rhs, order) };
894 let old_val = (old_raw_byte & mask) >> lshift;
895 FromPrimitive::from_u8(old_val).unwrap()
896 } else {
897 unsafe { T::fetch_and(meta_addr, val, order) }
898 }
899 },
900 |_old_val| {
901 #[cfg(feature = "extreme_assertions")]
902 sanity::verify_update::<T>(self, data_addr, _old_val, _old_val.bitand(val))
903 },
904 )
905 }
906
907 pub fn fetch_or_atomic<T: MetadataValue>(
911 &self,
912 data_addr: Address,
913 val: T,
914 order: Ordering,
915 ) -> T {
916 self.side_metadata_access::<true, T, _, _, _>(
917 data_addr,
918 Some(val),
919 || {
920 let meta_addr = address_to_meta_address(self, data_addr);
921 if self.log_num_of_bits < 3 {
922 let lshift = meta_byte_lshift(self, data_addr);
923 let mask = meta_byte_mask(self) << lshift;
924 let rhs = (val.to_u8().unwrap() << lshift) & mask;
926 let old_raw_byte =
927 unsafe { <u8 as MetadataValue>::fetch_or(meta_addr, rhs, order) };
928 let old_val = (old_raw_byte & mask) >> lshift;
929 FromPrimitive::from_u8(old_val).unwrap()
930 } else {
931 unsafe { T::fetch_or(meta_addr, val, order) }
932 }
933 },
934 |_old_val| {
935 #[cfg(feature = "extreme_assertions")]
936 sanity::verify_update::<T>(self, data_addr, _old_val, _old_val.bitor(val))
937 },
938 )
939 }
940
941 pub fn fetch_update_atomic<T: MetadataValue, F: FnMut(T) -> Option<T> + Copy>(
945 &self,
946 data_addr: Address,
947 set_order: Ordering,
948 fetch_order: Ordering,
949 mut f: F,
950 ) -> std::result::Result<T, T> {
951 self.side_metadata_access::<true, T, _, _, _>(
952 data_addr,
953 None,
954 move || -> std::result::Result<T, T> {
955 let meta_addr = address_to_meta_address(self, data_addr);
956 if self.log_num_of_bits < 3 {
957 let lshift = meta_byte_lshift(self, data_addr);
958 let mask = meta_byte_mask(self) << lshift;
959
960 unsafe {
961 <u8 as MetadataValue>::fetch_update(
962 meta_addr,
963 set_order,
964 fetch_order,
965 |raw_byte: u8| {
966 let old_val = (raw_byte & mask) >> lshift;
967 f(FromPrimitive::from_u8(old_val).unwrap()).map(|new_val| {
968 (raw_byte & !mask)
969 | ((new_val.to_u8().unwrap() << lshift) & mask)
970 })
971 },
972 )
973 }
974 .map(|x| FromPrimitive::from_u8((x & mask) >> lshift).unwrap())
975 .map_err(|x| FromPrimitive::from_u8((x & mask) >> lshift).unwrap())
976 } else {
977 unsafe { T::fetch_update(meta_addr, set_order, fetch_order, f) }
978 }
979 },
980 |_result| {
981 #[cfg(feature = "extreme_assertions")]
982 if let Ok(old_val) = _result {
983 sanity::verify_update::<T>(self, data_addr, old_val, f(old_val).unwrap())
984 }
985 },
986 )
987 }
988
989 #[allow(clippy::let_and_return)]
1003 pub unsafe fn find_prev_non_zero_value<T: MetadataValue>(
1004 &self,
1005 data_addr: Address,
1006 search_limit_bytes: usize,
1007 ) -> Option<Address> {
1008 debug_assert!(search_limit_bytes > 0);
1009
1010 if self.uses_contiguous_side_metadata() {
1011 let result = self.find_prev_non_zero_value_fast::<T>(data_addr, search_limit_bytes);
1013 #[cfg(debug_assertions)]
1014 {
1015 let result2 =
1017 self.find_prev_non_zero_value_simple::<T>(data_addr, search_limit_bytes);
1018 assert_eq!(result, result2, "find_prev_non_zero_value_fast returned a diffrent result from the naive implementation.");
1019 }
1020 result
1021 } else {
1022 warn!("We are trying to search non zero bits in an discontiguous side metadata. The performance is slow, as MMTk does not optimize for this case.");
1026 self.find_prev_non_zero_value_simple::<T>(data_addr, search_limit_bytes)
1027 }
1028 }
1029
1030 fn find_prev_non_zero_value_simple<T: MetadataValue>(
1031 &self,
1032 data_addr: Address,
1033 search_limit_bytes: usize,
1034 ) -> Option<Address> {
1035 let region_bytes = 1 << self.log_bytes_in_region;
1036 let start_addr = data_addr.align_down(region_bytes);
1038 let end_addr = data_addr.saturating_sub(search_limit_bytes) + 1usize;
1039
1040 let mut cursor = start_addr;
1041 while cursor >= end_addr {
1042 if !cursor.is_mapped() {
1044 return None;
1045 }
1046 if !unsafe { self.load::<T>(cursor).is_zero() } {
1048 return Some(cursor);
1049 }
1050 cursor -= region_bytes;
1051 }
1052 None
1053 }
1054
1055 #[allow(clippy::let_and_return)]
1056 fn find_prev_non_zero_value_fast<T: MetadataValue>(
1057 &self,
1058 data_addr: Address,
1059 search_limit_bytes: usize,
1060 ) -> Option<Address> {
1061 debug_assert!(self.uses_contiguous_side_metadata());
1062
1063 if !data_addr.is_mapped() {
1065 return None;
1066 }
1067 if !unsafe { self.load::<T>(data_addr).is_zero() } {
1069 return Some(data_addr.align_down(1 << self.log_bytes_in_region));
1070 }
1071
1072 let start_addr = data_addr.saturating_sub(search_limit_bytes) + 1usize;
1074 let end_addr = data_addr;
1075
1076 let start_meta_addr = address_to_contiguous_meta_address(self, start_addr);
1080 let start_meta_shift = meta_byte_lshift(self, start_addr);
1081 let end_meta_addr = address_to_contiguous_meta_address(self, end_addr);
1082 let end_meta_shift = meta_byte_lshift(self, end_addr);
1083
1084 let mut res = None;
1085
1086 let mut visitor = |range: BitByteRange| {
1087 match range {
1088 BitByteRange::Bytes { start, end } => {
1089 match helpers::find_last_non_zero_bit_in_metadata_bytes(start, end) {
1090 helpers::FindMetaBitResult::Found { addr, bit } => {
1091 let (addr, bit) = align_metadata_address(self, addr, bit);
1092 res = Some(contiguous_meta_address_to_address(self, addr, bit));
1093 true
1095 }
1096 helpers::FindMetaBitResult::UnmappedMetadata => true,
1098 helpers::FindMetaBitResult::NotFound => false,
1100 }
1101 }
1102 BitByteRange::BitsInByte {
1103 addr,
1104 bit_start,
1105 bit_end,
1106 } => {
1107 match helpers::find_last_non_zero_bit_in_metadata_bits(addr, bit_start, bit_end)
1108 {
1109 helpers::FindMetaBitResult::Found { addr, bit } => {
1110 let (addr, bit) = align_metadata_address(self, addr, bit);
1111 res = Some(contiguous_meta_address_to_address(self, addr, bit));
1112 true
1114 }
1115 helpers::FindMetaBitResult::UnmappedMetadata => true,
1117 helpers::FindMetaBitResult::NotFound => false,
1119 }
1120 }
1121 }
1122 };
1123
1124 ranges::break_bit_range(
1125 start_meta_addr,
1126 start_meta_shift,
1127 end_meta_addr,
1128 end_meta_shift,
1129 false,
1130 &mut visitor,
1131 );
1132
1133 res.map(|addr| addr.align_down(1 << self.log_bytes_in_region))
1138 .filter(|addr| *addr >= start_addr && *addr < end_addr)
1139 }
1140
1141 pub fn scan_non_zero_values<T: MetadataValue>(
1151 &self,
1152 data_start_addr: Address,
1153 data_end_addr: Address,
1154 visit_data: &mut impl FnMut(Address),
1155 ) {
1156 if self.uses_contiguous_side_metadata() && self.log_num_of_bits == 0 {
1157 self.scan_non_zero_values_fast(data_start_addr, data_end_addr, visit_data);
1162 } else {
1163 warn!(
1168 "We are trying to search for non zero bits in a discontiguous side metadata \
1169 or the metadata has more than one bit per region. \
1170 The performance is slow, as MMTk does not optimize for this case."
1171 );
1172 self.scan_non_zero_values_simple::<T>(data_start_addr, data_end_addr, visit_data);
1173 }
1174 }
1175
1176 fn scan_non_zero_values_simple<T: MetadataValue>(
1177 &self,
1178 data_start_addr: Address,
1179 data_end_addr: Address,
1180 visit_data: &mut impl FnMut(Address),
1181 ) {
1182 let region_bytes = 1usize << self.log_bytes_in_region;
1183
1184 let mut cursor = data_start_addr;
1185 while cursor < data_end_addr {
1186 debug_assert!(cursor.is_mapped());
1187
1188 if !unsafe { self.load::<T>(cursor).is_zero() } {
1190 visit_data(cursor);
1191 }
1192 cursor += region_bytes;
1193 }
1194 }
1195
1196 fn scan_non_zero_values_fast(
1197 &self,
1198 data_start_addr: Address,
1199 data_end_addr: Address,
1200 visit_data: &mut impl FnMut(Address),
1201 ) {
1202 debug_assert!(self.uses_contiguous_side_metadata());
1203 debug_assert_eq!(self.log_num_of_bits, 0);
1204
1205 let start_meta_addr = address_to_contiguous_meta_address(self, data_start_addr);
1207 let start_meta_shift = meta_byte_lshift(self, data_start_addr);
1208 let end_meta_addr = address_to_contiguous_meta_address(self, data_end_addr);
1209 let end_meta_shift = meta_byte_lshift(self, data_end_addr);
1210
1211 let mut visitor = |range| {
1212 match range {
1213 BitByteRange::Bytes { start, end } => {
1214 helpers::scan_non_zero_bits_in_metadata_bytes(start, end, &mut |addr, bit| {
1215 visit_data(helpers::contiguous_meta_address_to_address(self, addr, bit));
1216 });
1217 }
1218 BitByteRange::BitsInByte {
1219 addr,
1220 bit_start,
1221 bit_end,
1222 } => helpers::scan_non_zero_bits_in_metadata_bits(
1223 addr,
1224 bit_start,
1225 bit_end,
1226 &mut |addr, bit| {
1227 visit_data(helpers::contiguous_meta_address_to_address(self, addr, bit));
1228 },
1229 ),
1230 }
1231 false
1232 };
1233
1234 ranges::break_bit_range(
1235 start_meta_addr,
1236 start_meta_shift,
1237 end_meta_addr,
1238 end_meta_shift,
1239 true,
1240 &mut visitor,
1241 );
1242 }
1243}
1244
1245impl fmt::Debug for SideMetadataSpec {
1246 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1247 f.write_fmt(format_args!(
1248 "SideMetadataSpec {} {{ \
1249 **is_global: {:?} \
1250 **offset: {} \
1251 **log_num_of_bits: 0x{:x} \
1252 **log_bytes_in_region: 0x{:x} \
1253 }}",
1254 self.name,
1255 self.is_global,
1256 unsafe {
1257 if self.is_absolute_offset() {
1258 format!("0x{:x}", self.offset.addr)
1259 } else {
1260 format!("0x{:x}", self.offset.rel_offset)
1261 }
1262 },
1263 self.log_num_of_bits,
1264 self.log_bytes_in_region
1265 ))
1266 }
1267}
1268
1269#[derive(Clone, Copy)]
1273pub union SideMetadataOffset {
1274 addr: Address,
1275 rel_offset: usize,
1276}
1277
1278impl SideMetadataOffset {
1279 pub const fn addr(addr: Address) -> Self {
1281 SideMetadataOffset { addr }
1282 }
1283
1284 pub const fn rel(rel_offset: usize) -> Self {
1286 SideMetadataOffset { rel_offset }
1287 }
1288
1289 pub const fn layout_after(spec: &SideMetadataSpec) -> SideMetadataOffset {
1291 let upper_bound_offset = spec.upper_bound_offset();
1300 if spec.is_absolute_offset() {
1301 let addr = unsafe { upper_bound_offset.addr };
1302 let aligned_addr = addr.align_up(BYTES_IN_WORD);
1303 SideMetadataOffset::addr(aligned_addr)
1304 } else {
1305 let rel_offset = unsafe { upper_bound_offset.rel_offset };
1306 let aligned_rel_offset = raw_align_up(rel_offset, BYTES_IN_WORD);
1307 SideMetadataOffset::rel(aligned_rel_offset)
1308 }
1309 }
1310}
1311
1312impl PartialEq for SideMetadataOffset {
1315 fn eq(&self, other: &Self) -> bool {
1316 unsafe { self.rel_offset == other.rel_offset }
1317 }
1318}
1319impl Eq for SideMetadataOffset {}
1320
1321impl std::hash::Hash for SideMetadataOffset {
1322 fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
1323 unsafe { self.rel_offset }.hash(state);
1324 }
1325}
1326
1327pub(crate) struct SideMetadataContext {
1330 pub global: Vec<SideMetadataSpec>,
1332 pub local: Vec<SideMetadataSpec>,
1334}
1335
1336impl SideMetadataContext {
1337 #[allow(clippy::vec_init_then_push)] pub fn new_global_specs(specs: &[SideMetadataSpec]) -> Vec<SideMetadataSpec> {
1339 let mut ret = vec![];
1340
1341 #[cfg(feature = "vo_bit")]
1342 ret.push(VO_BIT_SIDE_METADATA_SPEC);
1343
1344 if let Some(spec) = crate::mmtk::SFT_MAP.get_side_metadata() {
1345 if spec.is_global {
1346 ret.push(*spec);
1347 }
1348 }
1349
1350 ret.push(crate::util::heap::chunk_map::ChunkMap::ALLOC_TABLE);
1354
1355 ret.extend_from_slice(specs);
1356 ret
1357 }
1358
1359 pub fn get_local_specs(&self) -> &[SideMetadataSpec] {
1360 &self.local
1361 }
1362
1363 pub fn calculate_reserved_pages(&self, data_pages: usize) -> usize {
1370 let mut total = 0;
1371 for spec in self.global.iter() {
1372 total += data_to_meta_size_round_up(spec, data_pages);
1376 }
1377 for spec in self.local.iter() {
1378 total += data_to_meta_size_round_up(spec, data_pages);
1379 }
1380 total
1381 }
1382
1383 pub fn try_map_metadata_space(
1389 &self,
1390 start: Address,
1391 size: usize,
1392 space_name: &str,
1393 ) -> MmapResult<()> {
1394 debug!(
1395 "try_map_metadata_space({}, 0x{:x}, {}, {})",
1396 start,
1397 size,
1398 self.global.len(),
1399 self.local.len()
1400 );
1401 debug_assert!(start.is_aligned_to(BYTES_IN_PAGE));
1403 debug_assert!(size % BYTES_IN_PAGE == 0);
1404 self.map_metadata_internal(start, size, false, space_name)
1405 }
1406
1407 pub fn try_map_metadata_address_range(
1412 &self,
1413 start: Address,
1414 size: usize,
1415 name: &str,
1416 ) -> MmapResult<()> {
1417 debug!(
1418 "try_map_metadata_address_range({}, 0x{:x}, {}, {})",
1419 start,
1420 size,
1421 self.global.len(),
1422 self.local.len()
1423 );
1424 debug_assert!(start.is_aligned_to(BYTES_IN_CHUNK));
1426 debug_assert!(size % BYTES_IN_CHUNK == 0);
1427 self.map_metadata_internal(start, size, true, name)
1428 }
1429
1430 fn map_metadata_internal(
1438 &self,
1439 start: Address,
1440 size: usize,
1441 no_reserve: bool,
1442 space_name: &str,
1443 ) -> MmapResult<()> {
1444 for spec in self.global.iter() {
1445 let anno = MmapAnnotation::SideMeta {
1446 space: space_name,
1447 meta: spec.name,
1448 };
1449 try_mmap_contiguous_metadata_space(start, size, spec, no_reserve, &anno)?;
1450 }
1451
1452 #[cfg(target_pointer_width = "32")]
1453 let mut lsize: usize = 0;
1454
1455 for spec in self.local.iter() {
1456 #[cfg(target_pointer_width = "64")]
1468 {
1469 let anno = MmapAnnotation::SideMeta {
1470 space: space_name,
1471 meta: spec.name,
1472 };
1473 try_mmap_contiguous_metadata_space(start, size, spec, no_reserve, &anno)?;
1474 }
1475 #[cfg(target_pointer_width = "32")]
1476 {
1477 lsize += metadata_bytes_per_chunk(spec.log_bytes_in_region, spec.log_num_of_bits);
1478 }
1479 }
1480
1481 #[cfg(target_pointer_width = "32")]
1482 if lsize > 0 {
1483 let max = BYTES_IN_CHUNK >> super::constants::LOG_LOCAL_SIDE_METADATA_WORST_CASE_RATIO;
1484 debug_assert!(
1485 lsize <= max,
1486 "local side metadata per chunk (0x{:x}) must be less than (0x{:x})",
1487 lsize,
1488 max
1489 );
1490 let anno = MmapAnnotation::SideMeta {
1493 space: space_name,
1494 meta: "all",
1495 };
1496 try_map_per_chunk_metadata_space(start, size, lsize, no_reserve, &anno)?;
1497 }
1498
1499 Ok(())
1500 }
1501
1502 #[cfg(test)]
1510 pub fn ensure_unmap_metadata_space(&self, start: Address, size: usize) {
1511 trace!("ensure_unmap_metadata_space({}, 0x{:x})", start, size);
1512 debug_assert!(start.is_aligned_to(BYTES_IN_PAGE));
1513 debug_assert!(size % BYTES_IN_PAGE == 0);
1514
1515 for spec in self.global.iter() {
1516 ensure_munmap_contiguous_metadata_space(start, size, spec);
1517 }
1518
1519 for spec in self.local.iter() {
1520 #[cfg(target_pointer_width = "64")]
1521 {
1522 ensure_munmap_contiguous_metadata_space(start, size, spec);
1523 }
1524 #[cfg(target_pointer_width = "32")]
1525 {
1526 ensure_munmap_chunked_metadata_space(start, size, spec);
1527 }
1528 }
1529 }
1530}
1531
1532pub struct MetadataByteArrayRef<const ENTRIES: usize> {
1534 #[cfg(feature = "extreme_assertions")]
1535 heap_range_start: Address,
1536 #[cfg(feature = "extreme_assertions")]
1537 spec: SideMetadataSpec,
1538 data: &'static [u8; ENTRIES],
1539}
1540
1541impl<const ENTRIES: usize> MetadataByteArrayRef<ENTRIES> {
1542 pub fn new(metadata_spec: &SideMetadataSpec, start: Address, bytes: usize) -> Self {
1551 debug_assert_eq!(
1552 metadata_spec.log_num_of_bits, LOG_BITS_IN_BYTE as usize,
1553 "Each heap entry should map to a byte in side-metadata"
1554 );
1555 debug_assert_eq!(
1556 bytes >> metadata_spec.log_bytes_in_region,
1557 ENTRIES,
1558 "Heap range size and MetadataByteArray size does not match"
1559 );
1560 Self {
1561 #[cfg(feature = "extreme_assertions")]
1562 heap_range_start: start,
1563 #[cfg(feature = "extreme_assertions")]
1564 spec: *metadata_spec,
1565 data: unsafe { &*address_to_meta_address(metadata_spec, start).to_ptr() },
1568 }
1569 }
1570
1571 #[allow(clippy::len_without_is_empty)]
1573 pub const fn len(&self) -> usize {
1574 ENTRIES
1575 }
1576
1577 #[allow(clippy::let_and_return)]
1579 pub fn get(&self, index: usize) -> u8 {
1580 #[cfg(feature = "extreme_assertions")]
1581 let _lock = sanity::SANITY_LOCK.lock().unwrap();
1582 let value = self.data[index];
1583 #[cfg(feature = "extreme_assertions")]
1584 {
1585 let data_addr = self.heap_range_start + (index << self.spec.log_bytes_in_region);
1586 sanity::verify_load::<u8>(&self.spec, data_addr, value);
1587 }
1588 value
1589 }
1590}
1591
1592#[cfg(test)]
1593mod tests {
1594 use super::*;
1595 use crate::mmap_anno_test;
1596 use crate::util::metadata::side_metadata::SideMetadataContext;
1597
1598 pub const ZERO_OFFSET: SideMetadataOffset = SideMetadataOffset { rel_offset: 0 };
1600
1601 #[test]
1602 fn calculate_reserved_pages_one_spec() {
1603 let spec = SideMetadataSpec {
1605 name: "test_spec",
1606 is_global: true,
1607 offset: ZERO_OFFSET,
1608 log_num_of_bits: 0,
1609 log_bytes_in_region: 3,
1610 };
1611 let side_metadata = SideMetadataContext {
1612 global: vec![spec],
1613 local: vec![],
1614 };
1615 assert_eq!(side_metadata.calculate_reserved_pages(0), 0);
1616 assert_eq!(side_metadata.calculate_reserved_pages(63), 1);
1617 assert_eq!(side_metadata.calculate_reserved_pages(64), 1);
1618 assert_eq!(side_metadata.calculate_reserved_pages(65), 2);
1619 assert_eq!(side_metadata.calculate_reserved_pages(1024), 16);
1620 }
1621
1622 #[test]
1623 fn calculate_reserved_pages_multi_specs() {
1624 let gspec = SideMetadataSpec {
1626 name: "gspec",
1627 is_global: true,
1628 offset: ZERO_OFFSET,
1629 log_num_of_bits: 0,
1630 log_bytes_in_region: 3,
1631 };
1632 let lspec = SideMetadataSpec {
1634 name: "lspec",
1635 is_global: false,
1636 offset: ZERO_OFFSET,
1637 log_num_of_bits: 1,
1638 log_bytes_in_region: 12,
1639 };
1640 let side_metadata = SideMetadataContext {
1641 global: vec![gspec],
1642 local: vec![lspec],
1643 };
1644 assert_eq!(side_metadata.calculate_reserved_pages(1024), 16 + 1);
1645 }
1646
1647 use crate::util::heap::layout::vm_layout;
1648 use crate::util::test_util::{serial_test, with_cleanup};
1649 use paste::paste;
1650
1651 const TEST_LOG_BYTES_IN_REGION: usize = 12;
1652
1653 fn test_side_metadata(
1654 log_bits: usize,
1655 f: impl Fn(&SideMetadataSpec, Address, Address) + std::panic::RefUnwindSafe,
1656 ) {
1657 serial_test(|| {
1658 let spec = SideMetadataSpec {
1659 name: "Test Spec $tname",
1660 is_global: true,
1661 offset: SideMetadataOffset::addr(GLOBAL_SIDE_METADATA_BASE_ADDRESS),
1662 log_num_of_bits: log_bits,
1663 log_bytes_in_region: TEST_LOG_BYTES_IN_REGION, };
1665 let context = SideMetadataContext {
1666 global: vec![spec],
1667 local: vec![],
1668 };
1669 let mut sanity = SideMetadataSanity::new();
1670 sanity.verify_metadata_context("TestPolicy", &context);
1671
1672 let data_addr = vm_layout::vm_layout().heap_start;
1673 crate::MMAPPER
1675 .ensure_mapped(
1676 data_addr,
1677 1,
1678 HugePageSupport::No,
1679 MmapProtection::ReadWrite,
1680 mmap_anno_test!(),
1681 )
1682 .unwrap();
1683 let meta_addr = address_to_meta_address(&spec, data_addr);
1684 with_cleanup(
1685 || {
1686 let mmap_result =
1687 context.try_map_metadata_space(data_addr, BYTES_IN_PAGE, "test_space");
1688 assert!(mmap_result.is_ok(), "{:?}", mmap_result);
1689
1690 f(&spec, data_addr, meta_addr);
1691 },
1692 || {
1693 assert!(log_bits <= 6);
1695 let meta_ptr: *mut u64 = meta_addr.to_mut_ptr();
1696 unsafe { *meta_ptr = 0 };
1697
1698 sanity::reset();
1699 },
1700 )
1701 })
1702 }
1703
1704 fn max_value(log_bits: usize) -> u64 {
1705 (0..(1 << log_bits)).fold(0, |accum, x| accum + (1 << x))
1706 }
1707 #[test]
1708 fn test_max_value() {
1709 assert_eq!(max_value(0), 1);
1710 assert_eq!(max_value(1), 0b11);
1711 assert_eq!(max_value(2), 0b1111);
1712 assert_eq!(max_value(3), 255);
1713 assert_eq!(max_value(4), 65535);
1714 }
1715
1716 macro_rules! test_side_metadata_access {
1717 ($tname: ident, $type: ty, $log_bits: expr) => {
1718 paste!{
1719 #[test]
1720 fn [<$tname _load>]() {
1721 test_side_metadata($log_bits, |spec, data_addr, meta_addr| {
1722 let meta_ptr: *mut $type = meta_addr.to_mut_ptr();
1723
1724 assert_eq!(unsafe { spec.load::<$type>(data_addr) }, 0);
1726 assert_eq!(spec.load_atomic::<$type>(data_addr, Ordering::SeqCst), 0);
1727
1728 let max_value: $type = max_value($log_bits) as _;
1730 unsafe { spec.store::<$type>(data_addr, max_value); }
1731 assert_eq!(unsafe { spec.load::<$type>(data_addr) }, max_value);
1732 assert_eq!(spec.load_atomic::<$type>(data_addr, Ordering::SeqCst), max_value);
1733 assert_eq!(unsafe { *meta_ptr }, max_value);
1734 });
1735 }
1736
1737 #[test]
1738 fn [<$tname _store>]() {
1739 test_side_metadata($log_bits, |spec, data_addr, meta_addr| {
1740 let meta_ptr: *mut $type = meta_addr.to_mut_ptr();
1741 let max_value: $type = max_value($log_bits) as _;
1742
1743 unsafe { *meta_ptr = <$type>::MAX; }
1745 unsafe { spec.store::<$type>(data_addr, 0); }
1747 assert_eq!(unsafe { spec.load::<$type>(data_addr) }, 0);
1748 assert_eq!(unsafe { *meta_ptr }, <$type>::MAX & (!max_value));
1750 });
1751 }
1752
1753 #[test]
1754 fn [<$tname _atomic_store>]() {
1755 test_side_metadata($log_bits, |spec, data_addr, meta_addr| {
1756 let meta_ptr: *mut $type = meta_addr.to_mut_ptr();
1757 let max_value: $type = max_value($log_bits) as _;
1758
1759 unsafe { *meta_ptr = <$type>::MAX; }
1761 spec.store_atomic::<$type>(data_addr, 0, Ordering::SeqCst);
1763 assert_eq!(unsafe { spec.load::<$type>(data_addr) }, 0);
1764 assert_eq!(unsafe { *meta_ptr }, <$type>::MAX & (!max_value));
1766 });
1767 }
1768
1769 #[test]
1770 fn [<$tname _compare_exchange_success>]() {
1771 test_side_metadata($log_bits, |spec, data_addr, meta_addr| {
1772 let meta_ptr: *mut $type = meta_addr.to_mut_ptr();
1773 let max_value: $type = max_value($log_bits) as _;
1774 unsafe { *meta_ptr = <$type>::MAX; }
1776 spec.store_atomic::<$type>(data_addr, 1, Ordering::SeqCst);
1778
1779 let old_val = spec.load_atomic::<$type>(data_addr, Ordering::SeqCst);
1780 assert_eq!(old_val, 1);
1781
1782 let new_val = 0;
1783 let res = spec.compare_exchange_atomic::<$type>(data_addr, old_val, new_val, Ordering::SeqCst, Ordering::SeqCst);
1784 assert!(res.is_ok());
1785 assert_eq!(res.unwrap(), old_val, "old vals do not match");
1786
1787 let after_update = spec.load_atomic::<$type>(data_addr, Ordering::SeqCst);
1788 assert_eq!(after_update, new_val);
1789 assert_eq!(unsafe { *meta_ptr }, <$type>::MAX & (!max_value));
1791 });
1792 }
1793
1794 #[test]
1795 fn [<$tname _compare_exchange_fail>]() {
1796 test_side_metadata($log_bits, |spec, data_addr, meta_addr| {
1797 let meta_ptr: *mut $type = meta_addr.to_mut_ptr();
1798 unsafe { *meta_ptr = <$type>::MAX; }
1800 spec.store_atomic::<$type>(data_addr, 1, Ordering::SeqCst);
1802
1803 let old_val = spec.load_atomic::<$type>(data_addr, Ordering::SeqCst);
1804 assert_eq!(old_val, 1);
1805
1806 spec.store_atomic::<$type>(data_addr, 0, Ordering::SeqCst);
1808 let bits_before_cas = unsafe { *meta_ptr };
1809
1810 let new_val = 0;
1811 let res = spec.compare_exchange_atomic::<$type>(data_addr, old_val, new_val, Ordering::SeqCst, Ordering::SeqCst);
1812 assert!(res.is_err());
1813 assert_eq!(res.err().unwrap(), 0);
1814 let bits_after_cas = unsafe { *meta_ptr };
1815 assert_eq!(bits_before_cas, bits_after_cas);
1816 });
1817 }
1818
1819 #[test]
1820 fn [<$tname _fetch_add_1>]() {
1821 test_side_metadata($log_bits, |spec, data_addr, meta_addr| {
1822 let meta_ptr: *mut $type = meta_addr.to_mut_ptr();
1823 unsafe { *meta_ptr = <$type>::MAX; }
1825 spec.store_atomic::<$type>(data_addr, 0, Ordering::SeqCst);
1827
1828 let old_val = spec.load_atomic::<$type>(data_addr, Ordering::SeqCst);
1829
1830 let old_val_from_fetch = spec.fetch_add_atomic::<$type>(data_addr, 1, Ordering::SeqCst);
1831 assert_eq!(old_val_from_fetch, old_val);
1832
1833 let new_val = spec.load_atomic::<$type>(data_addr, Ordering::SeqCst);
1834 assert_eq!(new_val, 1);
1835 });
1836 }
1837
1838 #[test]
1839 fn [<$tname _fetch_add_max>]() {
1840 test_side_metadata($log_bits, |spec, data_addr, meta_addr| {
1841 let meta_ptr: *mut $type = meta_addr.to_mut_ptr();
1842 let max_value: $type = max_value($log_bits) as _;
1843 unsafe { *meta_ptr = <$type>::MAX; }
1845 spec.store_atomic::<$type>(data_addr, 0, Ordering::SeqCst);
1847
1848 let old_val = spec.load_atomic::<$type>(data_addr, Ordering::SeqCst);
1849
1850 let old_val_from_fetch = spec.fetch_add_atomic::<$type>(data_addr, max_value, Ordering::SeqCst);
1851 assert_eq!(old_val_from_fetch, old_val);
1852
1853 let new_val = spec.load_atomic::<$type>(data_addr, Ordering::SeqCst);
1854 assert_eq!(new_val, max_value);
1855 });
1856 }
1857
1858 #[test]
1859 fn [<$tname _fetch_add_overflow>]() {
1860 test_side_metadata($log_bits, |spec, data_addr, meta_addr| {
1861 let meta_ptr: *mut $type = meta_addr.to_mut_ptr();
1862 let max_value: $type = max_value($log_bits) as _;
1863 unsafe { *meta_ptr = <$type>::MAX; }
1865 spec.store_atomic::<$type>(data_addr, max_value, Ordering::SeqCst);
1867
1868 let old_val = spec.load_atomic::<$type>(data_addr, Ordering::SeqCst);
1869
1870 let old_val_from_fetch = spec.fetch_add_atomic::<$type>(data_addr, 1, Ordering::SeqCst);
1872 assert_eq!(old_val_from_fetch, old_val);
1873
1874 let new_val = spec.load_atomic::<$type>(data_addr, Ordering::SeqCst);
1875 assert_eq!(new_val, 0);
1876 });
1877 }
1878
1879 #[test]
1880 fn [<$tname _fetch_sub_1>]() {
1881 test_side_metadata($log_bits, |spec, data_addr, meta_addr| {
1882 let meta_ptr: *mut $type = meta_addr.to_mut_ptr();
1883 unsafe { *meta_ptr = <$type>::MAX; }
1885 spec.store_atomic::<$type>(data_addr, 1, Ordering::SeqCst);
1887
1888 let old_val = spec.load_atomic::<$type>(data_addr, Ordering::SeqCst);
1889
1890 let old_val_from_fetch = spec.fetch_sub_atomic::<$type>(data_addr, 1, Ordering::SeqCst);
1891 assert_eq!(old_val_from_fetch, old_val);
1892
1893 let new_val = spec.load_atomic::<$type>(data_addr, Ordering::SeqCst);
1894 assert_eq!(new_val, 0);
1895 });
1896 }
1897
1898 #[test]
1899 fn [<$tname _fetch_sub_max>]() {
1900 test_side_metadata($log_bits, |spec, data_addr, meta_addr| {
1901 let meta_ptr: *mut $type = meta_addr.to_mut_ptr();
1902 let max_value: $type = max_value($log_bits) as _;
1903 unsafe { *meta_ptr = <$type>::MAX; }
1905 spec.store_atomic::<$type>(data_addr, max_value, Ordering::SeqCst);
1907
1908 let old_val = spec.load_atomic::<$type>(data_addr, Ordering::SeqCst);
1909
1910 let old_val_from_fetch = spec.fetch_sub_atomic::<$type>(data_addr, max_value, Ordering::SeqCst);
1911 assert_eq!(old_val_from_fetch, old_val);
1912
1913 let new_val = spec.load_atomic::<$type>(data_addr, Ordering::SeqCst);
1914 assert_eq!(new_val, 0);
1915 });
1916 }
1917
1918 #[test]
1919 fn [<$tname _fetch_sub_overflow>]() {
1920 test_side_metadata($log_bits, |spec, data_addr, meta_addr| {
1921 let meta_ptr: *mut $type = meta_addr.to_mut_ptr();
1922 let max_value: $type = max_value($log_bits) as _;
1923 unsafe { *meta_ptr = <$type>::MAX; }
1925 spec.store_atomic::<$type>(data_addr, 0, Ordering::SeqCst);
1927
1928 let old_val = spec.load_atomic::<$type>(data_addr, Ordering::SeqCst);
1929
1930 let old_val_from_fetch = spec.fetch_sub_atomic::<$type>(data_addr, 1, Ordering::SeqCst);
1932 assert_eq!(old_val_from_fetch, old_val);
1933
1934 let new_val = spec.load_atomic::<$type>(data_addr, Ordering::SeqCst);
1935 assert_eq!(new_val, max_value);
1936 });
1937 }
1938
1939 #[test]
1940 fn [<$tname _fetch_and>]() {
1941 test_side_metadata($log_bits, |spec, data_addr, meta_addr| {
1942 let meta_ptr: *mut $type = meta_addr.to_mut_ptr();
1943 let max_value: $type = max_value($log_bits) as _;
1944 unsafe { *meta_ptr = <$type>::MAX; }
1946 spec.store_atomic::<$type>(data_addr, max_value, Ordering::SeqCst);
1948
1949 let old_val = spec.load_atomic::<$type>(data_addr, Ordering::SeqCst);
1951 let old_val_from_fetch = spec.fetch_and_atomic::<$type>(data_addr, max_value, Ordering::SeqCst);
1952 assert_eq!(old_val_from_fetch, old_val, "old values do not match");
1953 assert_eq!(spec.load_atomic::<$type>(data_addr, Ordering::SeqCst), max_value, "load values do not match");
1954 assert_eq!(unsafe { *meta_ptr }, <$type>::MAX, "raw values do not match");
1955
1956 let last_bit_zero = max_value - 1;
1958 let old_val = spec.load_atomic::<$type>(data_addr, Ordering::SeqCst);
1959 let old_val_from_fetch = spec.fetch_and_atomic::<$type>(data_addr, last_bit_zero, Ordering::SeqCst);
1960 assert_eq!(old_val_from_fetch, old_val);
1961 assert_eq!(spec.load_atomic::<$type>(data_addr, Ordering::SeqCst), last_bit_zero);
1962 assert_eq!(unsafe { *meta_ptr }, <$type>::MAX - 1);
1963 });
1964 }
1965
1966 #[test]
1967 fn [<$tname _fetch_or>]() {
1968 test_side_metadata($log_bits, |spec, data_addr, meta_addr| {
1969 let meta_ptr: *mut $type = meta_addr.to_mut_ptr();
1970 let max_value: $type = max_value($log_bits) as _;
1971 unsafe { *meta_ptr = 0; }
1973 spec.store_atomic::<$type>(data_addr, 0, Ordering::SeqCst);
1975
1976 let old_val = spec.load_atomic::<$type>(data_addr, Ordering::SeqCst);
1978 let old_val_from_fetch = spec.fetch_or_atomic::<$type>(data_addr, 0, Ordering::SeqCst);
1979 assert_eq!(old_val_from_fetch, old_val);
1980 assert_eq!(spec.load_atomic::<$type>(data_addr, Ordering::SeqCst), 0);
1981 assert_eq!(unsafe { *meta_ptr }, 0);
1982
1983 let old_val = spec.load_atomic::<$type>(data_addr, Ordering::SeqCst);
1985 let old_val_from_fetch = spec.fetch_or_atomic::<$type>(data_addr, max_value, Ordering::SeqCst);
1986 assert_eq!(old_val_from_fetch, old_val);
1987 assert_eq!(spec.load_atomic::<$type>(data_addr, Ordering::SeqCst), max_value);
1988 assert_eq!(unsafe { *meta_ptr }, max_value);
1989 });
1990 }
1991
1992 #[test]
1993 fn [<$tname _fetch_update_success>]() {
1994 test_side_metadata($log_bits, |spec, data_addr, meta_addr| {
1995 let meta_ptr: *mut $type = meta_addr.to_mut_ptr();
1996 let max_value: $type = max_value($log_bits) as _;
1997 unsafe { *meta_ptr = <$type>::MAX; }
1999 spec.store_atomic::<$type>(data_addr, max_value, Ordering::SeqCst);
2001
2002 let old_val = spec.load_atomic::<$type>(data_addr, Ordering::SeqCst);
2004 let fetch_res = spec.fetch_update_atomic::<$type, _>(data_addr, Ordering::SeqCst, Ordering::SeqCst, |_x: $type| Some(0));
2005 assert!(fetch_res.is_ok());
2006 assert_eq!(fetch_res.unwrap(), old_val);
2007 assert_eq!(spec.load_atomic::<$type>(data_addr, Ordering::SeqCst), 0);
2008 assert_eq!(unsafe { *meta_ptr }, <$type>::MAX & (!max_value));
2010 });
2011 }
2012
2013 #[test]
2014 fn [<$tname _fetch_update_fail>]() {
2015 test_side_metadata($log_bits, |spec, data_addr, meta_addr| {
2016 let meta_ptr: *mut $type = meta_addr.to_mut_ptr();
2017 let max_value: $type = max_value($log_bits) as _;
2018 unsafe { *meta_ptr = <$type>::MAX; }
2020 spec.store_atomic::<$type>(data_addr, max_value, Ordering::SeqCst);
2022
2023 let old_val = spec.load_atomic::<$type>(data_addr, Ordering::SeqCst);
2025 let fetch_res = spec.fetch_update_atomic::<$type, _>(data_addr, Ordering::SeqCst, Ordering::SeqCst, |_x: $type| None);
2026 assert!(fetch_res.is_err());
2027 assert_eq!(fetch_res.err().unwrap(), old_val);
2028 assert_eq!(spec.load_atomic::<$type>(data_addr, Ordering::SeqCst), max_value);
2029 assert_eq!(unsafe { *meta_ptr }, <$type>::MAX);
2031 });
2032 }
2033
2034 #[test]
2035 fn [<$tname _find_prev_non_zero_value_easy>]() {
2036 test_side_metadata($log_bits, |spec, data_addr, _meta_addr| {
2037 let max_value: $type = max_value($log_bits) as _;
2038 spec.store_atomic::<$type>(data_addr, max_value, Ordering::SeqCst);
2040
2041 let res_addr = unsafe { spec.find_prev_non_zero_value::<$type>(data_addr, 8) };
2044 assert!(res_addr.is_some());
2045 assert_eq!(res_addr.unwrap(), data_addr);
2046 });
2047 }
2048
2049 #[test]
2050 fn [<$tname _find_prev_non_zero_value_arbitrary_bytes>]() {
2051 test_side_metadata($log_bits, |spec, data_addr, _meta_addr| {
2052 let max_value: $type = max_value($log_bits) as _;
2053 spec.store_atomic::<$type>(data_addr, max_value, Ordering::SeqCst);
2055
2056 let test_region = (1 << TEST_LOG_BYTES_IN_REGION);
2058 for len in 1..(test_region*4) {
2059 let start_addr = data_addr + len;
2060 let res_addr = unsafe { spec.find_prev_non_zero_value::<$type>(start_addr, len + 1) };
2062 assert!(res_addr.is_some());
2063 assert_eq!(res_addr.unwrap(), data_addr);
2064 }
2065 });
2066 }
2067
2068 #[test]
2069 fn [<$tname _find_prev_non_zero_value_arbitrary_start>]() {
2070 test_side_metadata($log_bits, |spec, data_addr, _meta_addr| {
2071 let max_value: $type = max_value($log_bits) as _;
2072
2073 for offset in 0..7usize {
2075 let test_data_addr = data_addr + offset;
2077 spec.store_atomic::<$type>(test_data_addr, max_value, Ordering::SeqCst);
2078
2079 let res_addr = unsafe { spec.find_prev_non_zero_value::<$type>(test_data_addr, 4096) };
2081 assert!(res_addr.is_some());
2082 assert_eq!(res_addr.unwrap(), data_addr);
2083
2084 spec.store_atomic::<$type>(test_data_addr, 0, Ordering::SeqCst);
2086 }
2087 });
2088 }
2089
2090 #[test]
2091 fn [<$tname _find_prev_non_zero_value_no_find>]() {
2092 test_side_metadata($log_bits, |spec, data_addr, _meta_addr| {
2093 spec.store_atomic::<$type>(data_addr, 0, Ordering::SeqCst);
2095
2096 let test_region = (1 << TEST_LOG_BYTES_IN_REGION);
2098 for len in 1..(test_region*4) {
2099 let start_addr = data_addr + len;
2100 let res_addr = unsafe { spec.find_prev_non_zero_value::<$type>(start_addr, len + 1) };
2102 assert!(res_addr.is_none());
2103 }
2104 });
2105 }
2106 }
2107 }
2108 }
2109
2110 test_side_metadata_access!(test_u1, u8, 0);
2111 test_side_metadata_access!(test_u2, u8, 1);
2112 test_side_metadata_access!(test_u4, u8, 2);
2113 test_side_metadata_access!(test_u8, u8, 3);
2114 test_side_metadata_access!(test_u16, u16, 4);
2115 test_side_metadata_access!(test_u32, u32, 5);
2116 test_side_metadata_access!(test_u64, u64, 6);
2117 test_side_metadata_access!(
2118 test_usize,
2119 usize,
2120 if cfg!(target_pointer_width = "64") {
2121 6
2122 } else if cfg!(target_pointer_width = "32") {
2123 5
2124 } else {
2125 unreachable!()
2126 }
2127 );
2128
2129 #[test]
2130 fn test_bulk_update_meta_bits() {
2131 let raw_mem =
2132 unsafe { std::alloc::alloc_zeroed(std::alloc::Layout::from_size_align(8, 8).unwrap()) };
2133 let addr = Address::from_mut_ptr(raw_mem);
2134
2135 SideMetadataSpec::set_meta_bits(addr, 0, addr, 4);
2136 assert_eq!(unsafe { addr.load::<u64>() }, 0b1111);
2137
2138 SideMetadataSpec::zero_meta_bits(addr, 1, addr, 3);
2139 assert_eq!(unsafe { addr.load::<u64>() }, 0b1001);
2140
2141 SideMetadataSpec::set_meta_bits(addr, 2, addr, 6);
2142 assert_eq!(unsafe { addr.load::<u64>() }, 0b0011_1101);
2143
2144 SideMetadataSpec::zero_meta_bits(addr, 0, addr + 1usize, 0);
2145 assert_eq!(unsafe { addr.load::<u64>() }, 0b0);
2146
2147 SideMetadataSpec::set_meta_bits(addr, 2, addr + 1usize, 2);
2148 assert_eq!(unsafe { addr.load::<u64>() }, 0b11_1111_1100);
2149
2150 SideMetadataSpec::set_meta_bits(addr, 0, addr + 1usize, 2);
2151 assert_eq!(unsafe { addr.load::<u64>() }, 0b11_1111_1111);
2152 }
2153}