mmtk/policy/
sft_map.rs

1use super::sft::*;
2use crate::util::metadata::side_metadata::SideMetadataSpec;
3use crate::util::Address;
4
5use std::sync::atomic::Ordering;
6
7/// SFTMap manages the SFT table, and mapping between addresses with indices in the table. The trait allows
8/// us to have multiple implementations of the SFT table.
9pub trait SFTMap {
10    /// Check if the address has an SFT entry in the map (including an empty SFT entry). This is mostly a bound check
11    /// to make sure that we won't have an index-out-of-bound error. For the sake of performance, the implementation
12    /// of other methods in this trait (such as get_unchecked(), update() and clear()) does not need to do this check implicitly.
13    /// Instead, they assume the address has a valid entry in the SFT. If an address could be arbitary, they should call this
14    /// method as a pre-check before they call those methods in the trait. We also provide a method `get_checked()` which includes
15    /// this check, and will return an empty SFT if the address is out of bound.
16    fn has_sft_entry(&self, addr: Address) -> bool;
17
18    /// Get the side metadata spec this SFT map uses.
19    fn get_side_metadata(&self) -> Option<&SideMetadataSpec>;
20
21    /// Get SFT for the address. The address must have a valid SFT entry in the table (e.g. from an object reference, or from an address
22    /// that is known to be in our spaces). Otherwise, use `get_checked()`.
23    ///
24    /// # Safety
25    /// The address must have a valid SFT entry in the map. Usually we know this if the address is from an object reference, or from our space address range.
26    /// Otherwise, the caller should check with `has_sft_entry()` before calling this method, or use `get_checked()`.
27    unsafe fn get_unchecked(&self, address: Address) -> &dyn SFT;
28
29    /// Get SFT for the address. The address can be arbitrary. For out-of-bound access, an empty SFT will be returned.
30    /// We only provide the checked version for `get()`, as it may be used to query arbitrary objects and addresses. Other methods like `update/clear/etc` are
31    /// mostly used inside MMTk, and in most cases, we know that they are within our space address range.
32    fn get_checked(&self, address: Address) -> &dyn SFT;
33
34    /// Set SFT for the address range. The address must have a valid SFT entry in the table.
35    ///
36    /// # Safety
37    /// The address must have a valid SFT entry in the map. Usually we know this if the address is from an object reference, or from our space address range.
38    /// Otherwise, the caller should check with `has_sft_entry()` before calling this method.
39    unsafe fn update(&self, space: SFTRawPointer, start: Address, bytes: usize);
40
41    /// Notify the SFT map for space creation. `DenseChunkMap` needs to create an entry for the space.
42    fn notify_space_creation(&mut self, _space: SFTRawPointer) {}
43
44    /// Eagerly initialize the SFT table. For most implementations, it could be the same as update().
45    /// However, we need this as a seprate method for SFTDenseChunkMap, as it needs to map side metadata first
46    /// before setting the table.
47    ///
48    /// # Safety
49    /// The address must have a valid SFT entry in the map. Usually we know this if the address is from an object reference, or from our space address range.
50    /// Otherwise, the caller should check with `has_sft_entry()` before calling this method.
51    unsafe fn eager_initialize(
52        &mut self,
53        space: *const (dyn SFT + Sync + 'static),
54        start: Address,
55        bytes: usize,
56    ) {
57        self.update(space, start, bytes);
58    }
59
60    /// Clear SFT for the address. The address must have a valid SFT entry in the table.
61    ///
62    /// # Safety
63    /// The address must have a valid SFT entry in the map. Usually we know this if the address is from an object reference, or from our space address range.
64    /// Otherwise, the caller should check with `has_sft_entry()` before calling this method.
65    unsafe fn clear(&self, address: Address);
66}
67
68pub(crate) fn create_sft_map() -> Box<dyn SFTMap> {
69    cfg_if::cfg_if! {
70        if #[cfg(target_pointer_width = "64")] {
71            // For 64bits, we generally want to use the space map, which requires using contiguous space and no off-heap memory.
72            // If the requirements do not meet, we have to choose a different SFT map implementation.
73            use crate::util::heap::layout::vm_layout::vm_layout;
74            if !vm_layout().force_use_contiguous_spaces {
75                // This is usually the case for compressed pointer. Use the 32bits implementation.
76                Box::new(sparse_chunk_map::SFTSparseChunkMap::new())
77            } else if cfg!(any(feature = "malloc_mark_sweep", feature = "vm_space")) {
78                // We have off-heap memory (malloc'd objects, or VM space). We have to use a chunk-based map.
79                Box::new(dense_chunk_map::SFTDenseChunkMap::new())
80            } else {
81                // We can use space map.
82                Box::new(space_map::SFTSpaceMap::new())
83            }
84        } else if #[cfg(target_pointer_width = "32")] {
85            // Use sparse chunk map. As we have limited virtual address range on 32 bits,
86            // it is okay to have a sparse chunk map which maps every chunk into an index in the array.
87            Box::new(sparse_chunk_map::SFTSparseChunkMap::new())
88        } else {
89            compile_err!("Cannot figure out which SFT map to use.");
90        }
91    }
92}
93
94/// The raw pointer for SFT. We expect a space to provide this to SFT map.
95pub(crate) type SFTRawPointer = *const (dyn SFT + Sync + 'static);
96
97/// We store raw pointer as a double word using atomics.
98/// We use portable_atomic. It provides non locking atomic operations where possible,
99/// and use a locking operation as the fallback.
100/// Rust only provides AtomicU128 for some platforms, and do not provide the type
101/// on x86_64-linux, as some earlier x86_64 CPUs do not have 128 bits atomic instructions.
102/// The crate portable_atomic works around the problem with a runtime detection to
103/// see if 128 bits atomic instructions are available.
104#[cfg(target_pointer_width = "64")]
105type AtomicDoubleWord = portable_atomic::AtomicU128;
106#[cfg(target_pointer_width = "64")]
107type DoubleWord = u128;
108#[cfg(target_pointer_width = "32")]
109type AtomicDoubleWord = portable_atomic::AtomicU64;
110#[cfg(target_pointer_width = "32")]
111type DoubleWord = u64;
112
113/// The type we store SFT raw pointer as. It basically just double word sized atomic integer.
114/// This type provides an abstraction so we can access SFT easily.
115#[repr(transparent)]
116pub(crate) struct SFTRefStorage(AtomicDoubleWord);
117
118impl SFTRefStorage {
119    /// A check at boot time to ensure `SFTRefStorage` is correct.
120    pub fn pre_use_check() {
121        // If we do not have lock free operations, warn the users.
122        if !AtomicDoubleWord::is_lock_free() {
123            warn!(
124                "SFT access word is not lock free on this platform. This will slow down SFT map."
125            );
126        }
127        // Our storage type needs to be the same width as the dyn pointer type.
128        assert_eq!(
129            std::mem::size_of::<AtomicDoubleWord>(),
130            std::mem::size_of::<SFTRawPointer>()
131        );
132    }
133
134    pub fn new(sft: SFTRawPointer) -> Self {
135        let val: DoubleWord = unsafe { std::mem::transmute(sft) };
136        Self(AtomicDoubleWord::new(val))
137    }
138
139    // Load with the acquire ordering.
140    pub fn load(&self) -> &dyn SFT {
141        let val = self.0.load(Ordering::Acquire);
142        // Provenance-related APIs were stabilized in Rust 1.84.
143        // Rust 1.91 introduced the warn-by-default lint `integer_to_ptr_transmutes`.
144        // However, pointer provenance API only works for ptr-sized intergers, and
145        // here we are transmuting from a double-word sized integer to a fat pointer.
146        // We still need to use transmute here.
147        #[allow(unknown_lints)]
148        #[allow(integer_to_ptr_transmutes)]
149        unsafe {
150            std::mem::transmute(val)
151        }
152    }
153
154    // Store a raw SFT pointer with the release ordering.
155    pub fn store(&self, sft: SFTRawPointer) {
156        let val: DoubleWord = unsafe { std::mem::transmute(sft) };
157        self.0.store(val, Ordering::Release)
158    }
159}
160
161impl std::default::Default for SFTRefStorage {
162    fn default() -> Self {
163        Self::new(&EMPTY_SPACE_SFT as SFTRawPointer)
164    }
165}
166
167#[allow(dead_code)]
168#[cfg(target_pointer_width = "64")] // This impl only works for 64 bits: 1. the mask is designed for our 64bit heap range, 2. on 64bits, all our spaces are contiguous.
169mod space_map {
170    use super::*;
171    use crate::util::heap::layout::vm_layout::vm_layout;
172
173    /// Space map is a small table, and it has one entry for each MMTk space.
174    pub struct SFTSpaceMap {
175        sft: Vec<SFTRefStorage>,
176        space_address_start: Address,
177        space_address_end: Address,
178    }
179
180    unsafe impl Sync for SFTSpaceMap {}
181
182    impl SFTMap for SFTSpaceMap {
183        fn has_sft_entry(&self, addr: Address) -> bool {
184            // An arbitrary address from Address::ZERO to Address::MAX will be cyclically mapped to an index between 0 and 31
185            // Only addresses between the virtual address range we use have valid entries.
186            addr >= self.space_address_start && addr < self.space_address_end
187        }
188
189        fn get_side_metadata(&self) -> Option<&SideMetadataSpec> {
190            None
191        }
192
193        fn get_checked(&self, address: Address) -> &dyn SFT {
194            if self.has_sft_entry(address) {
195                // We should be able to map the entire address range to indices in the table.
196                debug_assert!(Self::addr_to_index(address) < self.sft.len());
197                unsafe { self.get_unchecked(address) }
198            } else {
199                &EMPTY_SPACE_SFT
200            }
201        }
202
203        unsafe fn get_unchecked(&self, address: Address) -> &dyn SFT {
204            let cell = unsafe { self.sft.get_unchecked(Self::addr_to_index(address)) };
205            cell.load()
206        }
207
208        unsafe fn update(
209            &self,
210            space: *const (dyn SFT + Sync + 'static),
211            start: Address,
212            bytes: usize,
213        ) {
214            let index = Self::addr_to_index(start);
215            if cfg!(debug_assertions) {
216                // Make sure we only update from empty to a valid space, or overwrite the space
217                let old = self.sft[index].load();
218                assert!((*old).name() == EMPTY_SFT_NAME || (*old).name() == (*space).name());
219                // Make sure the range is in the space
220                let space_start = Self::index_to_space_start(index);
221                assert!(start >= space_start);
222                assert!(
223                    start + bytes <= space_start + vm_layout().max_space_extent(),
224                    "The range of {} + {} bytes does not fall into the space range {} and {}, \
225                    and it is probably outside the address range we use.",
226                    start,
227                    bytes,
228                    space_start,
229                    space_start + vm_layout().max_space_extent()
230                );
231            }
232
233            self.sft.get_unchecked(index).store(space);
234        }
235
236        unsafe fn clear(&self, addr: Address) {
237            let index = Self::addr_to_index(addr);
238            self.sft.get_unchecked(index).store(&EMPTY_SPACE_SFT as _);
239        }
240    }
241
242    impl SFTSpaceMap {
243        /// Create a new space map.
244        #[allow(clippy::assertions_on_constants)] // We assert to make sure the constants
245        pub fn new() -> Self {
246            use crate::util::heap::layout::heap_parameters::MAX_SPACES;
247            let table_size = Self::addr_to_index(Address::MAX) + 1;
248            debug_assert!(table_size >= MAX_SPACES);
249            Self {
250                sft: std::iter::repeat_with(SFTRefStorage::default)
251                    .take(table_size)
252                    .collect(),
253                space_address_start: Self::index_to_space_range(1).0, // the start of the first space
254                space_address_end: Self::index_to_space_range(MAX_SPACES - 1).1, // the end of the last space
255            }
256        }
257
258        fn addr_to_index(addr: Address) -> usize {
259            addr.and(vm_layout().address_mask()) >> vm_layout().log_space_extent
260        }
261
262        fn index_to_space_start(i: usize) -> Address {
263            let (start, _) = Self::index_to_space_range(i);
264            start
265        }
266
267        fn index_to_space_range(i: usize) -> (Address, Address) {
268            if i == 0 {
269                panic!("Invalid index: there is no space for index 0")
270            } else {
271                let start = Address::ZERO.add(i << vm_layout().log_space_extent);
272                let extent = 1 << vm_layout().log_space_extent;
273                (start, start.add(extent))
274            }
275        }
276    }
277
278    #[cfg(test)]
279    mod tests {
280        use super::*;
281        use crate::util::heap::layout::heap_parameters::MAX_SPACES;
282        use crate::util::heap::layout::vm_layout::vm_layout;
283
284        // If the test `test_address_arithmetic()` fails, it is possible due to change of our heap range, max space extent, or max number of spaces.
285        // We need to update the code and the constants for the address arithemtic.
286        #[test]
287        fn test_address_arithmetic() {
288            // Before 1st space
289            assert_eq!(SFTSpaceMap::addr_to_index(Address::ZERO), 0);
290            assert_eq!(SFTSpaceMap::addr_to_index(vm_layout().heap_start - 1), 0);
291
292            let assert_for_index = |i: usize| {
293                let (start, end) = SFTSpaceMap::index_to_space_range(i);
294                println!("Space: Index#{} = [{}, {})", i, start, end);
295                assert_eq!(SFTSpaceMap::addr_to_index(start), i);
296                assert_eq!(SFTSpaceMap::addr_to_index(end - 1), i);
297            };
298
299            // Index 1 to 16 (MAX_SPACES)
300            for i in 1..=MAX_SPACES {
301                assert_for_index(i);
302            }
303
304            // assert space end
305            let (_, last_space_end) = SFTSpaceMap::index_to_space_range(MAX_SPACES);
306            println!("Space end = {}", last_space_end);
307            println!("Heap  end = {}", vm_layout().heap_end);
308            assert_eq!(last_space_end, vm_layout().heap_end);
309
310            // after last space
311            assert_eq!(SFTSpaceMap::addr_to_index(last_space_end), 17);
312            assert_eq!(SFTSpaceMap::addr_to_index(Address::MAX), 31);
313        }
314    }
315}
316
317#[allow(dead_code)]
318mod dense_chunk_map {
319    use super::*;
320    use crate::util::conversions;
321    use crate::util::heap::layout::vm_layout::BYTES_IN_CHUNK;
322    use crate::util::metadata::side_metadata::spec_defs::SFT_DENSE_CHUNK_MAP_INDEX;
323    use crate::util::metadata::side_metadata::*;
324    use std::collections::HashMap;
325    use std::sync::atomic::Ordering;
326
327    /// SFTDenseChunkMap is a small table. It has one entry for each space in the table, and use
328    /// side metadata to record the index for each chunk. This works for both 32 bits and 64 bits.
329    /// However, its performance is expected to be suboptimal, compared to the sparse chunk map on
330    /// 32 bits, and the space map on 64 bits. So usually we do not use this implementation. However,
331    /// it provides some flexibility so we can set SFT at chunk basis for 64bits for decent performance.
332    /// For example, when we use library malloc for mark sweep, we have no control of where the
333    /// library malloc may allocate into, so we cannot use the space map. And using a sparse chunk map
334    /// will be costly in terms of memory. In this case, the dense chunk map is a good solution.
335    pub struct SFTDenseChunkMap {
336        /// The dense table, one entry per space. We use side metadata to store the space index for each chunk.
337        /// 0 is EMPTY_SPACE_SFT.
338        sft: Vec<SFTRefStorage>,
339        /// A map from space name (assuming they are unique) to their index. We use this to know whether we have
340        /// pushed &dyn SFT for a space, and to know its index.
341        index_map: HashMap<String, usize>,
342    }
343
344    unsafe impl Sync for SFTDenseChunkMap {}
345
346    impl SFTMap for SFTDenseChunkMap {
347        fn has_sft_entry(&self, addr: Address) -> bool {
348            if SFT_DENSE_CHUNK_MAP_INDEX.is_mapped(addr) {
349                let index = Self::addr_to_index(addr);
350                index < self.sft.len() as u8
351            } else {
352                // We haven't mapped side metadata for the chunk, so we do not have an SFT entry for the address.
353                false
354            }
355        }
356
357        fn get_side_metadata(&self) -> Option<&SideMetadataSpec> {
358            Some(&crate::util::metadata::side_metadata::spec_defs::SFT_DENSE_CHUNK_MAP_INDEX)
359        }
360
361        fn get_checked(&self, address: Address) -> &dyn SFT {
362            if self.has_sft_entry(address) {
363                unsafe { self.get_unchecked(address) }
364            } else {
365                &EMPTY_SPACE_SFT
366            }
367        }
368
369        unsafe fn get_unchecked(&self, address: Address) -> &dyn SFT {
370            let cell = self
371                .sft
372                .get_unchecked(Self::addr_to_index(address) as usize);
373            cell.load()
374        }
375
376        fn notify_space_creation(&mut self, space: SFTRawPointer) {
377            // Insert the space into the SFT table, and the SFT map.
378
379            let space_name = unsafe { &*space }.name().to_string();
380            // We shouldn't have this space in our map yet. Otherwise, this method is called multiple times for the same space.
381            assert!(!self.index_map.contains_key(&space_name));
382            // Index for the space
383            let index = self.sft.len();
384            // Insert to hashmap and vec
385            self.sft.push(SFTRefStorage::new(space));
386            self.index_map.insert(space_name, index);
387        }
388
389        unsafe fn eager_initialize(&mut self, space: SFTRawPointer, start: Address, bytes: usize) {
390            let context = SideMetadataContext {
391                global: vec![SFT_DENSE_CHUNK_MAP_INDEX],
392                local: vec![],
393            };
394            context
395                .try_map_metadata_space(start, bytes, "SFTDenseChunkMap")
396                .unwrap_or_else(|e| {
397                    panic!("failed to mmap metadata memory: {e}");
398                });
399
400            self.update(space, start, bytes);
401        }
402
403        unsafe fn update(
404            &self,
405            space: *const (dyn SFT + Sync + 'static),
406            start: Address,
407            bytes: usize,
408        ) {
409            let index: u8 = *self.index_map.get((*space).name()).unwrap() as u8;
410
411            // Iterate through the chunks and record the space index in the side metadata.
412            let first_chunk = conversions::chunk_align_down(start);
413            let last_chunk = conversions::chunk_align_up(start + bytes);
414            let mut chunk = first_chunk;
415            debug!(
416                "update {} (chunk {}) to {} (chunk {})",
417                start,
418                first_chunk,
419                start + bytes,
420                last_chunk
421            );
422            while chunk < last_chunk {
423                trace!("Update {} to index {}", chunk, index);
424                SFT_DENSE_CHUNK_MAP_INDEX.store_atomic::<u8>(chunk, index, Ordering::SeqCst);
425                chunk += BYTES_IN_CHUNK;
426            }
427            debug!("update done");
428        }
429
430        unsafe fn clear(&self, address: Address) {
431            SFT_DENSE_CHUNK_MAP_INDEX.store_atomic::<u8>(
432                address,
433                Self::EMPTY_SFT_INDEX,
434                Ordering::SeqCst,
435            );
436        }
437    }
438
439    impl SFTDenseChunkMap {
440        /// Empty space is at index 0
441        const EMPTY_SFT_INDEX: u8 = 0;
442
443        pub fn new() -> Self {
444            Self {
445                // Empty space is at index 0
446                sft: vec![SFTRefStorage::default()],
447                index_map: HashMap::new(),
448            }
449        }
450
451        pub fn addr_to_index(addr: Address) -> u8 {
452            SFT_DENSE_CHUNK_MAP_INDEX.load_atomic::<u8>(addr, Ordering::Relaxed)
453        }
454    }
455}
456
457#[allow(dead_code)]
458mod sparse_chunk_map {
459    use super::*;
460    use crate::util::conversions;
461    use crate::util::conversions::*;
462    use crate::util::heap::layout::vm_layout::vm_layout;
463    use crate::util::heap::layout::vm_layout::BYTES_IN_CHUNK;
464
465    /// The chunk map is a sparse table. It has one entry for each chunk in the address space we may use.
466    pub struct SFTSparseChunkMap {
467        sft: Vec<SFTRefStorage>,
468    }
469
470    unsafe impl Sync for SFTSparseChunkMap {}
471
472    impl SFTMap for SFTSparseChunkMap {
473        fn has_sft_entry(&self, addr: Address) -> bool {
474            addr.chunk_index() < vm_layout().max_chunks()
475        }
476
477        fn get_side_metadata(&self) -> Option<&SideMetadataSpec> {
478            None
479        }
480
481        fn get_checked(&self, address: Address) -> &dyn SFT {
482            if self.has_sft_entry(address) {
483                unsafe { self.get_unchecked(address) }
484            } else {
485                &EMPTY_SPACE_SFT
486            }
487        }
488
489        unsafe fn get_unchecked(&self, address: Address) -> &dyn SFT {
490            let cell = self.sft.get_unchecked(address.chunk_index());
491            cell.load()
492        }
493
494        /// Update SFT map for the given address range.
495        /// It should be used when we acquire new memory and use it as part of a space. For example, the cases include:
496        /// 1. when a space grows, 2. when initializing a contiguous space, 3. when ensure_mapped() is called on a space.
497        unsafe fn update(
498            &self,
499            space: *const (dyn SFT + Sync + 'static),
500            start: Address,
501            bytes: usize,
502        ) {
503            if DEBUG_SFT {
504                self.log_update(&*space, start, bytes);
505            }
506            let first = start.chunk_index();
507            let last = conversions::chunk_align_up(start + bytes).chunk_index();
508            for chunk in first..last {
509                self.set(chunk, &*space);
510            }
511            if DEBUG_SFT {
512                self.trace_sft_map();
513            }
514        }
515
516        // TODO: We should clear a SFT entry when a space releases a chunk.
517        #[allow(dead_code)]
518        unsafe fn clear(&self, chunk_start: Address) {
519            if DEBUG_SFT {
520                debug!(
521                    "Clear SFT for chunk {} (was {})",
522                    chunk_start,
523                    self.get_checked(chunk_start).name()
524                );
525            }
526            assert!(chunk_start.is_aligned_to(BYTES_IN_CHUNK));
527            let chunk_idx = chunk_start.chunk_index();
528            self.set(chunk_idx, &EMPTY_SPACE_SFT);
529        }
530    }
531
532    impl SFTSparseChunkMap {
533        pub fn new() -> Self {
534            SFTSparseChunkMap {
535                sft: std::iter::repeat_with(SFTRefStorage::default)
536                    .take(vm_layout().max_chunks())
537                    .collect(),
538            }
539        }
540
541        fn log_update(&self, space: &(dyn SFT + Sync + 'static), start: Address, bytes: usize) {
542            debug!("Update SFT for Chunk {} as {}", start, space.name(),);
543            let first = start.chunk_index();
544            let start_chunk = chunk_index_to_address(first);
545            debug!(
546                "Update SFT for {} bytes of Chunk {} #{}",
547                bytes, start_chunk, first
548            );
549        }
550
551        fn trace_sft_map(&self) {
552            trace!("{}", self.print_sft_map());
553        }
554
555        // This can be used during debugging to print SFT map.
556        fn print_sft_map(&self) -> String {
557            // print the entire SFT map
558            let mut res = String::new();
559
560            const SPACE_PER_LINE: usize = 10;
561            for i in (0..self.sft.len()).step_by(SPACE_PER_LINE) {
562                let max = if i + SPACE_PER_LINE > self.sft.len() {
563                    self.sft.len()
564                } else {
565                    i + SPACE_PER_LINE
566                };
567                let chunks: Vec<usize> = (i..max).collect();
568                let space_names: Vec<&str> =
569                    chunks.iter().map(|&x| self.sft[x].load().name()).collect();
570                res.push_str(&format!(
571                    "{}: {}",
572                    chunk_index_to_address(i),
573                    space_names.join(",")
574                ));
575                res.push('\n');
576            }
577
578            res
579        }
580
581        fn set(&self, chunk: usize, sft: &(dyn SFT + Sync + 'static)) {
582            /*
583             * This is safe (only) because a) this is only called during the
584             * allocation and deallocation of chunks, which happens under a global
585             * lock, and b) it only transitions from empty to valid and valid to
586             * empty, so if there were a race to view the contents, in the one case
587             * it would either see the new (valid) space or an empty space (both of
588             * which are reasonable), and in the other case it would either see the
589             * old (valid) space or an empty space, both of which are valid.
590             */
591
592            // It is okay to set empty to valid, or set valid to empty. It is wrong if we overwrite a valid value with another valid value.
593            if cfg!(debug_assertions) {
594                let old = self.sft[chunk].load().name();
595                let new = sft.name();
596                // Allow overwriting the same SFT pointer. E.g., if we have set SFT map for a space, then ensure_mapped() is called on the same,
597                // in which case, we still set SFT map again.
598                debug_assert!(
599                    old == EMPTY_SFT_NAME || new == EMPTY_SFT_NAME || old == new,
600                    "attempt to overwrite a non-empty chunk {} ({}) in SFT map (from {} to {})",
601                    chunk,
602                    crate::util::conversions::chunk_index_to_address(chunk),
603                    old,
604                    new
605                );
606            }
607            unsafe { self.sft.get_unchecked(chunk).store(sft) };
608        }
609    }
610}