mmtk/util/metadata/side_metadata/
helpers.rs

1use super::ranges::BitOffset;
2use super::SideMetadataSpec;
3use crate::util::constants::LOG_BYTES_IN_PAGE;
4use crate::util::constants::{BITS_IN_WORD, BYTES_IN_PAGE, LOG_BITS_IN_BYTE};
5use crate::util::conversions::rshift_align_up;
6use crate::util::heap::layout::vm_layout::VMLayout;
7use crate::util::memory::{MmapAnnotation, MmapStrategy};
8#[cfg(target_pointer_width = "32")]
9use crate::util::metadata::side_metadata::address_to_chunked_meta_address;
10use crate::util::Address;
11use crate::MMAPPER;
12use std::io::Result;
13
14/// Performs address translation in contiguous metadata spaces (e.g. global and policy-specific in 64-bits, and global in 32-bits)
15pub(super) fn address_to_contiguous_meta_address(
16    metadata_spec: &SideMetadataSpec,
17    data_addr: Address,
18) -> Address {
19    let log_bits_num = metadata_spec.log_num_of_bits as i32;
20    let log_bytes_in_region = metadata_spec.log_bytes_in_region;
21
22    let shift = (LOG_BITS_IN_BYTE as i32) - log_bits_num;
23
24    if shift >= 0 {
25        metadata_spec.get_absolute_offset() + ((data_addr >> log_bytes_in_region) >> shift)
26    } else {
27        metadata_spec.get_absolute_offset() + ((data_addr >> log_bytes_in_region) << (-shift))
28    }
29}
30
31/// Performs reverse address translation from contiguous metadata bits to data addresses.
32/// The input address and bit shift should be aligned.
33///
34/// Arguments:
35/// * `metadata_spec`: The side metadata spec. It should be contiguous side metadata.
36/// * `metadata_addr`; The metadata address. Returned by [`address_to_contiguous_meta_address`].
37/// * `bit`: The bit shift for the metadata. Returned by [`meta_byte_lshift`].
38pub(super) fn contiguous_meta_address_to_address(
39    metadata_spec: &SideMetadataSpec,
40    metadata_addr: Address,
41    bit: u8,
42) -> Address {
43    debug_assert_eq!(
44        align_metadata_address(metadata_spec, metadata_addr, bit),
45        (metadata_addr, bit)
46    );
47    let shift = (LOG_BITS_IN_BYTE as i32) - metadata_spec.log_num_of_bits as i32;
48    let relative_meta_addr = metadata_addr - metadata_spec.get_absolute_offset();
49
50    let data_addr_intermediate = if shift >= 0 {
51        relative_meta_addr << shift
52    } else {
53        relative_meta_addr >> (-shift)
54    };
55    let data_addr_bit_shift = if shift >= 0 {
56        metadata_spec.log_bytes_in_region - metadata_spec.log_num_of_bits
57    } else {
58        metadata_spec.log_bytes_in_region
59    };
60
61    let data_addr = (data_addr_intermediate << metadata_spec.log_bytes_in_region)
62        + ((bit as usize) << data_addr_bit_shift);
63
64    unsafe { Address::from_usize(data_addr) }
65}
66
67/// Align an pair of a metadata address and a metadata bit offset to the start of this metadata value.
68/// For example, when the metadata is 4 bits, it should only start at bit 0 or bit 4.
69/// When the metadata is 16 bits, it should only start at bit 0, and its metadata address should be aligned to 2 bytes.
70/// This is important, as [`contiguous_meta_address_to_address`] can only convert the start address of metadata to
71/// the data address.
72pub(super) fn align_metadata_address(
73    spec: &SideMetadataSpec,
74    metadata_addr: Address,
75    bit: u8,
76) -> (Address, u8) {
77    if spec.log_num_of_bits >= LOG_BITS_IN_BYTE as usize {
78        (
79            metadata_addr.align_down(1 << (spec.log_num_of_bits - LOG_BITS_IN_BYTE as usize)),
80            0,
81        )
82    } else {
83        (
84            metadata_addr,
85            crate::util::conversions::raw_align_down(
86                bit as usize,
87                (1 << spec.log_num_of_bits) as usize,
88            ) as u8,
89        )
90    }
91}
92
93/// Unmaps the specified metadata range, or panics.
94#[cfg(test)]
95pub(crate) fn ensure_munmap_metadata(start: Address, size: usize) {
96    use crate::util::memory;
97    trace!("ensure_munmap_metadata({}, 0x{:x})", start, size);
98
99    assert!(memory::munmap(start, size).is_ok())
100}
101
102/// Unmaps a metadata space (`spec`) for the specified data address range (`start` and `size`)
103/// Returns the size in bytes that get munmapped.
104#[cfg(test)]
105pub(crate) fn ensure_munmap_contiguous_metadata_space(
106    start: Address,
107    size: usize,
108    spec: &SideMetadataSpec,
109) -> usize {
110    // nearest page-aligned starting address
111    let metadata_start = address_to_meta_address(spec, start);
112    let mmap_start = metadata_start.align_down(BYTES_IN_PAGE);
113    // nearest page-aligned ending address
114    let metadata_size = data_to_meta_size_round_up(spec, size);
115    let mmap_size = (metadata_start + metadata_size).align_up(BYTES_IN_PAGE) - mmap_start;
116    if mmap_size > 0 {
117        ensure_munmap_metadata(mmap_start, mmap_size);
118    }
119    mmap_size
120}
121
122/// Tries to mmap the metadata space (`spec`) for the specified data address range (`start` and `size`).
123/// Setting `no_reserve` to true means the function will only map address range, without reserving swap-space/physical memory.
124/// Returns the size in bytes that gets mmapped in the function if success.
125pub(super) fn try_mmap_contiguous_metadata_space(
126    start: Address,
127    size: usize,
128    spec: &SideMetadataSpec,
129    no_reserve: bool,
130    anno: &MmapAnnotation,
131) -> Result<usize> {
132    debug_assert!(start.is_aligned_to(BYTES_IN_PAGE));
133    debug_assert!(size % BYTES_IN_PAGE == 0);
134
135    // nearest page-aligned starting address
136    let metadata_start = address_to_meta_address(spec, start);
137    let mmap_start = metadata_start.align_down(BYTES_IN_PAGE);
138    // nearest page-aligned ending address
139    let metadata_size = data_to_meta_size_round_up(spec, size);
140    let mmap_size = (metadata_start + metadata_size).align_up(BYTES_IN_PAGE) - mmap_start;
141    if mmap_size > 0 {
142        if !no_reserve {
143            MMAPPER.ensure_mapped(
144                mmap_start,
145                mmap_size >> LOG_BYTES_IN_PAGE,
146                MmapStrategy::SIDE_METADATA,
147                anno,
148            )
149        } else {
150            MMAPPER.quarantine_address_range(
151                mmap_start,
152                mmap_size >> LOG_BYTES_IN_PAGE,
153                MmapStrategy::SIDE_METADATA,
154                anno,
155            )
156        }
157        .map(|_| mmap_size)
158    } else {
159        Ok(0)
160    }
161}
162
163/// Performs the translation of data address (`data_addr`) to metadata address for the specified metadata (`metadata_spec`).
164pub(crate) fn address_to_meta_address(
165    metadata_spec: &SideMetadataSpec,
166    data_addr: Address,
167) -> Address {
168    #[cfg(target_pointer_width = "32")]
169    let res = {
170        if metadata_spec.is_global {
171            address_to_contiguous_meta_address(metadata_spec, data_addr)
172        } else {
173            address_to_chunked_meta_address(metadata_spec, data_addr)
174        }
175    };
176    #[cfg(target_pointer_width = "64")]
177    let res = { address_to_contiguous_meta_address(metadata_spec, data_addr) };
178
179    trace!(
180        "address_to_meta_address({:?}, addr: {}) -> 0x{:x}",
181        metadata_spec,
182        data_addr,
183        res
184    );
185
186    res
187}
188
189/// Return the base-2 logarithm of the ratio of data bits and metadata bits per region.
190///
191/// Suppose a memory region has `data_bits` bits of data, and `meta_bits` bits of metadata for
192/// `metadata_spec`, and the result of `log_data_meta_ratio(metadata_spec)` is `shift`, then
193///
194/// -   `data_bits >> shift == meta_bits`
195/// -   `meta_bits << shift == data_bits`
196pub(super) const fn log_data_meta_ratio(metadata_spec: &SideMetadataSpec) -> usize {
197    let log_data_bits_in_region = (LOG_BITS_IN_BYTE as usize) + metadata_spec.log_bytes_in_region;
198    let log_meta_bits_in_region = metadata_spec.log_num_of_bits;
199
200    // TODO: In theory, it is possible to construct a side metadata that has more metadata bits than
201    // data bits per region.  But such pathological side metadata consumes way too much memory, and
202    // should never be used in any useful applications.  It should be forbidden.
203    log_data_bits_in_region - log_meta_bits_in_region
204}
205
206/// Calculate the amount of metadata needed for the give amount of data memory, round up to nearest
207/// integer. `data_size` can be in any unit, e.g. bits, bytes, pages, blocks, chunks, etc., and the
208/// result has the same unit.
209pub(super) const fn data_to_meta_size_round_up(
210    metadata_spec: &SideMetadataSpec,
211    data_size: usize,
212) -> usize {
213    rshift_align_up(data_size, log_data_meta_ratio(metadata_spec))
214}
215
216/// Calculate the amount of data governed by the give amount of metadata.  `meta_size` can be in any
217/// unit, e.g. bits, bytes, pages, blocks, chunks, etc., and the result has the same unit.
218pub(super) const fn meta_to_data_size(metadata_spec: &SideMetadataSpec, meta_size: usize) -> usize {
219    meta_size << log_data_meta_ratio(metadata_spec)
220}
221
222#[allow(dead_code)]
223pub(super) const fn metadata_address_range_size(metadata_spec: &SideMetadataSpec) -> usize {
224    1usize << (VMLayout::LOG_ARCH_ADDRESS_SPACE - log_data_meta_ratio(metadata_spec))
225}
226
227pub(super) fn meta_byte_lshift(metadata_spec: &SideMetadataSpec, data_addr: Address) -> u8 {
228    let bits_num_log = metadata_spec.log_num_of_bits as i32;
229    if bits_num_log >= 3 {
230        return 0;
231    }
232    let rem_shift = BITS_IN_WORD as i32 - ((LOG_BITS_IN_BYTE as i32) - bits_num_log);
233    ((((data_addr >> metadata_spec.log_bytes_in_region) << rem_shift) >> rem_shift) << bits_num_log)
234        as u8
235}
236
237pub(super) fn meta_byte_mask(metadata_spec: &SideMetadataSpec) -> u8 {
238    let bits_num_log = metadata_spec.log_num_of_bits;
239    ((1usize << (1usize << bits_num_log)) - 1) as u8
240}
241
242/// The result type for find meta bits functions.
243pub enum FindMetaBitResult {
244    Found { addr: Address, bit: u8 },
245    NotFound,
246    UnmappedMetadata,
247}
248
249// Check and find the last bit that is set. We try load words where possible, and fall back to load bytes.
250pub fn find_last_non_zero_bit_in_metadata_bytes(
251    meta_start: Address,
252    meta_end: Address,
253) -> FindMetaBitResult {
254    use crate::util::constants::BYTES_IN_ADDRESS;
255
256    let mmap_granularity = MMAPPER.granularity();
257
258    let mut cur = meta_end;
259    // We need to check if metadata address is mapped or not.  But we make use of the granularity of
260    // the `Mmapper` to reduce the number of checks.  This records the start of a grain that is
261    // tested to be mapped.
262    let mut mapped_grain = Address::MAX;
263    while cur > meta_start {
264        // If we can check the whole word, set step to word size. Otherwise, the step is 1 (byte) and we check byte.
265        let step = if cur.is_aligned_to(BYTES_IN_ADDRESS) && cur - BYTES_IN_ADDRESS >= meta_start {
266            BYTES_IN_ADDRESS
267        } else {
268            1
269        };
270        // Move to the address so we can load from it
271        cur -= step;
272        // The value we check has to be in the range.
273        debug_assert!(
274            cur >= meta_start && cur < meta_end,
275            "Check metadata value at meta address {}, which is not in the range of [{}, {})",
276            cur,
277            meta_start,
278            meta_end
279        );
280
281        // If we are looking at an address that is not in a mapped chunk, we need to check if the chunk if mapped.
282        if cur < mapped_grain {
283            if cur.is_mapped() {
284                // This is mapped. No need to check for this chunk.
285                mapped_grain = cur.align_down(mmap_granularity);
286            } else {
287                return FindMetaBitResult::UnmappedMetadata;
288            }
289        }
290
291        if step == BYTES_IN_ADDRESS {
292            // Load and check a usize word
293            let value = unsafe { cur.load::<usize>() };
294            if value != 0 {
295                let bit = find_last_non_zero_bit::<usize>(value, 0, usize::BITS as u8).unwrap();
296                let byte_offset = bit >> LOG_BITS_IN_BYTE;
297                let bit_offset = bit - ((byte_offset) << LOG_BITS_IN_BYTE);
298                return FindMetaBitResult::Found {
299                    addr: cur + byte_offset as usize,
300                    bit: bit_offset,
301                };
302            }
303        } else {
304            // Load and check a byte
305            let value = unsafe { cur.load::<u8>() };
306            if let Some(bit) = find_last_non_zero_bit::<u8>(value, 0, 8) {
307                return FindMetaBitResult::Found { addr: cur, bit };
308            }
309        }
310    }
311    FindMetaBitResult::NotFound
312}
313
314// Check and find the last non-zero bit in the same byte.
315pub fn find_last_non_zero_bit_in_metadata_bits(
316    addr: Address,
317    start_bit: u8,
318    end_bit: u8,
319) -> FindMetaBitResult {
320    if !addr.is_mapped() {
321        return FindMetaBitResult::UnmappedMetadata;
322    }
323    let byte = unsafe { addr.load::<u8>() };
324    if let Some(bit) = find_last_non_zero_bit::<u8>(byte, start_bit, end_bit) {
325        return FindMetaBitResult::Found { addr, bit };
326    }
327    FindMetaBitResult::NotFound
328}
329
330use num_traits::{CheckedShl, PrimInt};
331fn find_last_non_zero_bit<T>(value: T, start: u8, end: u8) -> Option<u8>
332where
333    T: PrimInt + CheckedShl,
334{
335    let mask = match T::one().checked_shl((end - start) as u32) {
336        Some(shl) => (shl - T::one()) << (start as u32),
337        None => T::max_value() << (start as u32),
338    };
339    let masked = value & mask;
340    if masked.is_zero() {
341        None
342    } else {
343        let leading_zeroes = masked.leading_zeros();
344        let total_bits = std::mem::size_of::<T>() * u8::BITS as usize;
345        Some(total_bits as u8 - leading_zeroes as u8 - 1)
346    }
347}
348
349pub fn scan_non_zero_bits_in_metadata_bytes(
350    meta_start: Address,
351    meta_end: Address,
352    visit_bit: &mut impl FnMut(Address, BitOffset),
353) {
354    use crate::util::constants::BYTES_IN_ADDRESS;
355
356    let mut cursor = meta_start;
357    while cursor < meta_end && !cursor.is_aligned_to(BYTES_IN_ADDRESS) {
358        let byte = unsafe { cursor.load::<u8>() };
359        scan_non_zero_bits_in_metadata_word(cursor, byte as usize, visit_bit);
360        cursor += 1usize;
361    }
362
363    while cursor + BYTES_IN_ADDRESS < meta_end {
364        let word = unsafe { cursor.load::<usize>() };
365        scan_non_zero_bits_in_metadata_word(cursor, word, visit_bit);
366        cursor += BYTES_IN_ADDRESS;
367    }
368
369    while cursor < meta_end {
370        let byte = unsafe { cursor.load::<u8>() };
371        scan_non_zero_bits_in_metadata_word(cursor, byte as usize, visit_bit);
372        cursor += 1usize;
373    }
374}
375
376fn scan_non_zero_bits_in_metadata_word(
377    meta_addr: Address,
378    mut word: usize,
379    visit_bit: &mut impl FnMut(Address, BitOffset),
380) {
381    while word != 0 {
382        let bit = word.trailing_zeros();
383        visit_bit(meta_addr, bit as u8);
384        word = word & (word - 1);
385    }
386}
387
388pub fn scan_non_zero_bits_in_metadata_bits(
389    meta_addr: Address,
390    bit_start: BitOffset,
391    bit_end: BitOffset,
392    visit_bit: &mut impl FnMut(Address, BitOffset),
393) {
394    let byte = unsafe { meta_addr.load::<u8>() };
395    for bit in bit_start..bit_end {
396        if byte & (1 << bit) != 0 {
397            visit_bit(meta_addr, bit);
398        }
399    }
400}
401
402#[cfg(test)]
403mod tests {
404    use super::*;
405    use crate::util::metadata::side_metadata::*;
406
407    fn test_round_trip_conversion(spec: &SideMetadataSpec, test_data: &[Address]) {
408        for ref_addr in test_data {
409            let addr = *ref_addr;
410
411            // This is an aligned address. When we do roundtrip conversion, we will get back the original address.
412            {
413                assert!(addr.is_aligned_to(1 << spec.log_bytes_in_region));
414                let meta_addr = address_to_contiguous_meta_address(spec, addr);
415                let shift = meta_byte_lshift(spec, addr);
416                assert_eq!(
417                    contiguous_meta_address_to_address(spec, meta_addr, shift),
418                    addr
419                );
420            }
421
422            // This is an unaligned address. When we do roundtrip conversion, we will get the aligned address.
423            {
424                let next_addr = addr + 1usize;
425                let meta_addr = address_to_contiguous_meta_address(spec, next_addr);
426                let shift = meta_byte_lshift(spec, next_addr);
427                assert_eq!(
428                    contiguous_meta_address_to_address(spec, meta_addr, shift),
429                    addr
430                ); // we get back addr (which is the aligned address)
431            }
432        }
433    }
434
435    const TEST_ADDRESS_8B_REGION: [Address; 8] = [
436        unsafe { Address::from_usize(0x8000_0000) },
437        unsafe { Address::from_usize(0x8000_0008) },
438        unsafe { Address::from_usize(0x8000_0010) },
439        unsafe { Address::from_usize(0x8000_0018) },
440        unsafe { Address::from_usize(0x8000_0020) },
441        unsafe { Address::from_usize(0x8001_0000) },
442        unsafe { Address::from_usize(0x8001_0008) },
443        unsafe { Address::from_usize(0xd000_0000) },
444    ];
445
446    #[test]
447    fn test_contiguous_metadata_conversion_0_3() {
448        let spec = SideMetadataSpec {
449            name: "ContiguousMetadataTestSpec",
450            is_global: true,
451            offset: SideMetadataOffset::addr(GLOBAL_SIDE_METADATA_BASE_ADDRESS),
452            log_num_of_bits: 0,
453            log_bytes_in_region: 3,
454        };
455
456        test_round_trip_conversion(&spec, &TEST_ADDRESS_8B_REGION);
457    }
458
459    #[test]
460    fn test_contiguous_metadata_conversion_1_3() {
461        let spec = SideMetadataSpec {
462            name: "ContiguousMetadataTestSpec",
463            is_global: true,
464            offset: SideMetadataOffset::addr(GLOBAL_SIDE_METADATA_BASE_ADDRESS),
465            log_num_of_bits: 1,
466            log_bytes_in_region: 3,
467        };
468
469        test_round_trip_conversion(&spec, &TEST_ADDRESS_8B_REGION);
470    }
471
472    #[test]
473    fn test_contiguous_metadata_conversion_4_3() {
474        let spec = SideMetadataSpec {
475            name: "ContiguousMetadataTestSpec",
476            is_global: true,
477            offset: SideMetadataOffset::addr(GLOBAL_SIDE_METADATA_BASE_ADDRESS),
478            log_num_of_bits: 4,
479            log_bytes_in_region: 3,
480        };
481
482        test_round_trip_conversion(&spec, &TEST_ADDRESS_8B_REGION);
483    }
484
485    #[test]
486    fn test_contiguous_metadata_conversion_5_3() {
487        let spec = SideMetadataSpec {
488            name: "ContiguousMetadataTestSpec",
489            is_global: true,
490            offset: SideMetadataOffset::addr(GLOBAL_SIDE_METADATA_BASE_ADDRESS),
491            log_num_of_bits: 5,
492            log_bytes_in_region: 3,
493        };
494
495        test_round_trip_conversion(&spec, &TEST_ADDRESS_8B_REGION);
496    }
497
498    const TEST_ADDRESS_4KB_REGION: [Address; 8] = [
499        unsafe { Address::from_usize(0x8000_0000) },
500        unsafe { Address::from_usize(0x8000_1000) },
501        unsafe { Address::from_usize(0x8000_2000) },
502        unsafe { Address::from_usize(0x8000_3000) },
503        unsafe { Address::from_usize(0x8000_4000) },
504        unsafe { Address::from_usize(0x8001_0000) },
505        unsafe { Address::from_usize(0x8001_1000) },
506        unsafe { Address::from_usize(0xd000_0000) },
507    ];
508
509    #[test]
510    fn test_contiguous_metadata_conversion_0_12() {
511        let spec = SideMetadataSpec {
512            name: "ContiguousMetadataTestSpec",
513            is_global: true,
514            offset: SideMetadataOffset::addr(GLOBAL_SIDE_METADATA_BASE_ADDRESS),
515            log_num_of_bits: 0,
516            log_bytes_in_region: 12, // 4K
517        };
518
519        test_round_trip_conversion(&spec, &TEST_ADDRESS_4KB_REGION);
520    }
521
522    #[test]
523    fn test_find_last_non_zero_bit_in_u8() {
524        use super::find_last_non_zero_bit;
525        let bit = find_last_non_zero_bit::<u8>(0b100101, 0, 1);
526        assert_eq!(bit, Some(0));
527
528        let bit = find_last_non_zero_bit::<u8>(0b100101, 0, 3);
529        assert_eq!(bit, Some(2));
530
531        let bit = find_last_non_zero_bit::<u8>(0b100101, 0, 8);
532        assert_eq!(bit, Some(5));
533
534        let bit = find_last_non_zero_bit::<u8>(0b0, 0, 1);
535        assert_eq!(bit, None);
536    }
537
538    #[test]
539    fn test_align_metadata_address() {
540        let create_spec = |log_num_of_bits: usize| SideMetadataSpec {
541            name: "AlignMetadataBitTestSpec",
542            is_global: true,
543            offset: SideMetadataOffset::addr(GLOBAL_SIDE_METADATA_BASE_ADDRESS),
544            log_num_of_bits,
545            log_bytes_in_region: 3,
546        };
547
548        const ADDR_1000: Address = unsafe { Address::from_usize(0x1000) };
549        const ADDR_1001: Address = unsafe { Address::from_usize(0x1001) };
550        const ADDR_1002: Address = unsafe { Address::from_usize(0x1002) };
551        const ADDR_1003: Address = unsafe { Address::from_usize(0x1003) };
552        const ADDR_1004: Address = unsafe { Address::from_usize(0x1004) };
553        const ADDR_1005: Address = unsafe { Address::from_usize(0x1005) };
554        const ADDR_1006: Address = unsafe { Address::from_usize(0x1006) };
555        const ADDR_1007: Address = unsafe { Address::from_usize(0x1007) };
556        const ADDR_1008: Address = unsafe { Address::from_usize(0x1008) };
557        const ADDR_1009: Address = unsafe { Address::from_usize(0x1009) };
558
559        let metadata_2bits = create_spec(1);
560        assert_eq!(
561            align_metadata_address(&metadata_2bits, ADDR_1000, 0),
562            (ADDR_1000, 0)
563        );
564        assert_eq!(
565            align_metadata_address(&metadata_2bits, ADDR_1000, 1),
566            (ADDR_1000, 0)
567        );
568        assert_eq!(
569            align_metadata_address(&metadata_2bits, ADDR_1000, 2),
570            (ADDR_1000, 2)
571        );
572        assert_eq!(
573            align_metadata_address(&metadata_2bits, ADDR_1000, 3),
574            (ADDR_1000, 2)
575        );
576        assert_eq!(
577            align_metadata_address(&metadata_2bits, ADDR_1000, 4),
578            (ADDR_1000, 4)
579        );
580        assert_eq!(
581            align_metadata_address(&metadata_2bits, ADDR_1000, 5),
582            (ADDR_1000, 4)
583        );
584        assert_eq!(
585            align_metadata_address(&metadata_2bits, ADDR_1000, 6),
586            (ADDR_1000, 6)
587        );
588        assert_eq!(
589            align_metadata_address(&metadata_2bits, ADDR_1000, 7),
590            (ADDR_1000, 6)
591        );
592
593        let metadata_4bits = create_spec(2);
594        assert_eq!(
595            align_metadata_address(&metadata_4bits, ADDR_1000, 0),
596            (ADDR_1000, 0)
597        );
598        assert_eq!(
599            align_metadata_address(&metadata_4bits, ADDR_1000, 1),
600            (ADDR_1000, 0)
601        );
602        assert_eq!(
603            align_metadata_address(&metadata_4bits, ADDR_1000, 2),
604            (ADDR_1000, 0)
605        );
606        assert_eq!(
607            align_metadata_address(&metadata_4bits, ADDR_1000, 3),
608            (ADDR_1000, 0)
609        );
610        assert_eq!(
611            align_metadata_address(&metadata_4bits, ADDR_1000, 4),
612            (ADDR_1000, 4)
613        );
614        assert_eq!(
615            align_metadata_address(&metadata_4bits, ADDR_1000, 5),
616            (ADDR_1000, 4)
617        );
618        assert_eq!(
619            align_metadata_address(&metadata_4bits, ADDR_1000, 6),
620            (ADDR_1000, 4)
621        );
622        assert_eq!(
623            align_metadata_address(&metadata_4bits, ADDR_1000, 7),
624            (ADDR_1000, 4)
625        );
626
627        let metadata_8bits = create_spec(3);
628        assert_eq!(
629            align_metadata_address(&metadata_8bits, ADDR_1000, 0),
630            (ADDR_1000, 0)
631        );
632        assert_eq!(
633            align_metadata_address(&metadata_8bits, ADDR_1000, 1),
634            (ADDR_1000, 0)
635        );
636        assert_eq!(
637            align_metadata_address(&metadata_8bits, ADDR_1000, 2),
638            (ADDR_1000, 0)
639        );
640        assert_eq!(
641            align_metadata_address(&metadata_8bits, ADDR_1000, 3),
642            (ADDR_1000, 0)
643        );
644        assert_eq!(
645            align_metadata_address(&metadata_8bits, ADDR_1000, 4),
646            (ADDR_1000, 0)
647        );
648        assert_eq!(
649            align_metadata_address(&metadata_8bits, ADDR_1000, 5),
650            (ADDR_1000, 0)
651        );
652        assert_eq!(
653            align_metadata_address(&metadata_8bits, ADDR_1000, 6),
654            (ADDR_1000, 0)
655        );
656        assert_eq!(
657            align_metadata_address(&metadata_8bits, ADDR_1000, 7),
658            (ADDR_1000, 0)
659        );
660
661        let metadata_16bits = create_spec(4);
662        assert_eq!(
663            align_metadata_address(&metadata_16bits, ADDR_1000, 0),
664            (ADDR_1000, 0)
665        );
666        assert_eq!(
667            align_metadata_address(&metadata_16bits, ADDR_1000, 1),
668            (ADDR_1000, 0)
669        );
670        assert_eq!(
671            align_metadata_address(&metadata_16bits, ADDR_1000, 2),
672            (ADDR_1000, 0)
673        );
674        assert_eq!(
675            align_metadata_address(&metadata_16bits, ADDR_1000, 3),
676            (ADDR_1000, 0)
677        );
678        assert_eq!(
679            align_metadata_address(&metadata_16bits, ADDR_1000, 4),
680            (ADDR_1000, 0)
681        );
682        assert_eq!(
683            align_metadata_address(&metadata_16bits, ADDR_1000, 5),
684            (ADDR_1000, 0)
685        );
686        assert_eq!(
687            align_metadata_address(&metadata_16bits, ADDR_1000, 6),
688            (ADDR_1000, 0)
689        );
690        assert_eq!(
691            align_metadata_address(&metadata_16bits, ADDR_1000, 7),
692            (ADDR_1000, 0)
693        );
694        assert_eq!(
695            align_metadata_address(&metadata_16bits, ADDR_1001, 0),
696            (ADDR_1000, 0)
697        );
698        assert_eq!(
699            align_metadata_address(&metadata_16bits, ADDR_1001, 1),
700            (ADDR_1000, 0)
701        );
702        assert_eq!(
703            align_metadata_address(&metadata_16bits, ADDR_1001, 2),
704            (ADDR_1000, 0)
705        );
706        assert_eq!(
707            align_metadata_address(&metadata_16bits, ADDR_1001, 3),
708            (ADDR_1000, 0)
709        );
710        assert_eq!(
711            align_metadata_address(&metadata_16bits, ADDR_1001, 4),
712            (ADDR_1000, 0)
713        );
714        assert_eq!(
715            align_metadata_address(&metadata_16bits, ADDR_1001, 5),
716            (ADDR_1000, 0)
717        );
718        assert_eq!(
719            align_metadata_address(&metadata_16bits, ADDR_1001, 6),
720            (ADDR_1000, 0)
721        );
722        assert_eq!(
723            align_metadata_address(&metadata_16bits, ADDR_1001, 7),
724            (ADDR_1000, 0)
725        );
726    }
727}