mmtk/util/os/imp/unix_like/
unix_common.rs1use crate::util::address::Address;
2use crate::util::constants::BYTES_IN_PAGE;
3use crate::util::conversions::raw_align_up;
4use crate::util::os::*;
5use std::io::Result;
6
7impl MmapProtection {
8 fn get_native_flags(&self) -> i32 {
9 use libc::{PROT_EXEC, PROT_NONE, PROT_READ, PROT_WRITE};
10 match self {
11 Self::ReadWrite => PROT_READ | PROT_WRITE,
12 Self::ReadWriteExec => PROT_READ | PROT_WRITE | PROT_EXEC,
13 Self::NoAccess => PROT_NONE,
14 }
15 }
16}
17
18pub fn mmap(
19 start: Address,
20 size: usize,
21 strategy: MmapStrategy,
22 annotation: &MmapAnnotation<'_>,
23) -> MmapResult<Address> {
24 let ptr = start.to_mut_ptr();
25 let prot = strategy.prot.get_native_flags();
26 let flags = strategy.get_posix_mmap_flags(true);
27 wrap_libc_call(
28 &|| unsafe { libc::mmap(start.to_mut_ptr(), size, prot, flags, -1, 0) },
29 ptr,
30 )
31 .map_err(|e| MmapError::new(start, size, annotation, e))?;
32 Ok(start)
33}
34
35pub fn mmap_anywhere(size: usize, align: usize, strategy: MmapStrategy) -> Result<Address> {
36 debug_assert!(align.is_power_of_two());
37 debug_assert!(align % BYTES_IN_PAGE == 0);
38 debug_assert!(size % BYTES_IN_PAGE == 0);
39
40 let aligned_size = raw_align_up(size, align);
41 let alloc_size = aligned_size + align;
42 let prot = strategy.prot.get_native_flags();
43 let flags = strategy.get_posix_mmap_flags(false);
44
45 let ptr = unsafe { libc::mmap(std::ptr::null_mut(), alloc_size, prot, flags, -1, 0) };
46 if ptr == libc::MAP_FAILED {
47 return Err(std::io::Error::last_os_error());
48 }
49
50 let start = Address::from_mut_ptr(ptr);
51 let aligned_start = start.align_up(align);
52
53 let leading_unaligned_size = aligned_start - start;
54 let trailing_unaligned_size = alloc_size - leading_unaligned_size - size;
55
56 if leading_unaligned_size > 0 {
57 debug_assert!(leading_unaligned_size % BYTES_IN_PAGE == 0);
58 munmap(start, leading_unaligned_size)?;
59 }
60
61 if trailing_unaligned_size > 0 {
62 debug_assert!(trailing_unaligned_size % BYTES_IN_PAGE == 0);
63 let trailing_start = aligned_start + size;
64 munmap(trailing_start, trailing_unaligned_size)?;
65 }
66
67 Ok(aligned_start)
68}
69
70pub fn is_mmap_oom(os_errno: i32) -> bool {
71 os_errno == libc::ENOMEM
72}
73
74pub fn munmap(start: Address, size: usize) -> Result<()> {
75 wrap_libc_call(&|| unsafe { libc::munmap(start.to_mut_ptr(), size) }, 0)
76}
77
78pub fn mprotect(start: Address, size: usize, prot: MmapProtection) -> Result<()> {
79 wrap_libc_call(
80 &|| unsafe { libc::mprotect(start.to_mut_ptr(), size, prot.get_native_flags()) },
81 0,
82 )
83}
84
85pub type ProcessIDType = libc::pid_t;
86pub type ThreadIDType = libc::pthread_t;
87
88pub fn get_process_id() -> Result<ProcessIDType> {
89 Ok(unsafe { libc::getpid() })
90}
91
92pub fn get_thread_id() -> Result<ThreadIDType> {
93 Ok(unsafe { libc::pthread_self() })
94}
95
96pub fn wrap_libc_call<T: PartialEq>(f: &dyn Fn() -> T, expect: T) -> Result<()> {
97 let ret = f();
98 if ret == expect {
99 Ok(())
100 } else {
101 Err(std::io::Error::last_os_error())
102 }
103}
104
105#[cfg(all(test, target_os = "linux"))]
106mod tests {
107 use super::*;
108 use crate::util::heap::layout::vm_layout::BYTES_IN_CHUNK;
109 use crate::util::test_util::{serial_test, with_cleanup};
110 use std::io::ErrorKind;
111
112 fn assert_mapping_state(start: Address, size: usize, expect_mapped: bool) {
113 let annotation = MmapAnnotation::Misc {
114 name: "mmap_anywhere_test",
115 };
116 match mmap(
117 start,
118 size,
119 MmapStrategy::QUARANTINE.replace(false),
120 &annotation,
121 ) {
122 Ok(_) => {
123 let _ = munmap(start, size);
124 assert!(
125 !expect_mapped,
126 "{start} of size {size} should still be mapped"
127 );
128 }
129 Err(e) => {
130 assert_eq!(e.error.kind(), ErrorKind::AlreadyExists);
131 assert!(expect_mapped, "{start} of size {size} should be unmapped");
132 }
133 }
134 }
135
136 #[test]
137 fn mmap_anywhere_unmaps_alignment_padding() {
138 serial_test(|| {
139 let size = BYTES_IN_CHUNK + BYTES_IN_PAGE;
140 let start = mmap_anywhere(size, BYTES_IN_CHUNK, MmapStrategy::QUARANTINE).unwrap();
141
142 with_cleanup(
143 || {
144 assert!(start.is_aligned_to(BYTES_IN_CHUNK));
145 assert_mapping_state(start + size - BYTES_IN_PAGE, BYTES_IN_PAGE, true);
146 assert_mapping_state(start + size, BYTES_IN_PAGE, false);
147 },
148 || {
149 let _ = munmap(start, size);
150 },
151 );
152 });
153 }
154}