mmtk/util/os/imp/unix_like/linux_like/
linux_common.rs1use crate::util::address::Address;
2use crate::util::os::imp::unix_like::unix_common;
3use crate::util::os::*;
4use libc::{cpu_set_t, sched_getaffinity, sched_setaffinity, CPU_COUNT, CPU_SET, CPU_ZERO};
5use std::io::Result;
6
7pub fn set_vma_name(start: Address, size: usize, annotation: &MmapAnnotation) {
8 let anno_str = annotation.to_string();
15 let anno_cstr = std::ffi::CString::new(anno_str).unwrap();
16 let result = unix_common::wrap_libc_call(
17 &|| unsafe {
18 libc::prctl(
19 libc::PR_SET_VMA,
20 libc::PR_SET_VMA_ANON_NAME,
21 start.to_ptr::<libc::c_void>(),
22 size,
23 anno_cstr.as_ptr(),
24 )
25 },
26 0,
27 );
28 if let Err(e) = result {
29 debug!("Error while calling prctl: {e}");
30 }
31}
32
33pub fn set_hugepage(start: Address, size: usize, options: HugePageSupport) -> Result<()> {
35 match options {
36 HugePageSupport::No => Ok(()),
37 HugePageSupport::TransparentHugePages => unix_common::wrap_libc_call(
38 &|| unsafe { libc::madvise(start.to_mut_ptr(), size, libc::MADV_HUGEPAGE) },
39 0,
40 ),
41 }
42}
43
44impl MmapStrategy {
45 pub fn get_posix_mmap_flags(&self, fixed: bool) -> i32 {
47 let mut flags = libc::MAP_PRIVATE | libc::MAP_ANONYMOUS;
48 if fixed {
49 if self.replace {
50 flags |= libc::MAP_FIXED;
51 } else {
52 flags |= libc::MAP_FIXED_NOREPLACE
53 }
54 }
55 if !self.reserve {
56 flags |= libc::MAP_NORESERVE;
57 }
58 flags
59 }
60}
61
62pub fn get_process_memory_maps() -> Result<String> {
65 use std::fs::File;
67 use std::io::Read;
68 let mut data = String::new();
69 let mut f = File::open("/proc/self/maps")?;
70 f.read_to_string(&mut data)?;
71 Ok(data)
72}
73
74pub fn get_total_num_cpus() -> CoreNum {
75 use std::mem::MaybeUninit;
76 unsafe {
77 let mut cs = MaybeUninit::zeroed().assume_init();
78 CPU_ZERO(&mut cs);
79 sched_getaffinity(0, std::mem::size_of::<cpu_set_t>(), &mut cs);
80 CPU_COUNT(&cs) as u16
81 }
82}
83
84pub fn bind_current_thread_to_core(core_id: CoreId) {
85 use std::mem::MaybeUninit;
86 unsafe {
87 let mut cs = MaybeUninit::zeroed().assume_init();
88 CPU_ZERO(&mut cs);
89 CPU_SET(core_id as usize, &mut cs);
90 sched_setaffinity(0, std::mem::size_of::<cpu_set_t>(), &cs);
91 }
92}
93
94pub fn bind_current_thread_to_cpuset(cpuset: &[CoreId]) {
95 use std::mem::MaybeUninit;
96 unsafe {
97 let mut cs = MaybeUninit::zeroed().assume_init();
98 CPU_ZERO(&mut cs);
99 for cpu in cpuset {
100 CPU_SET(*cpu as usize, &mut cs);
101 }
102 sched_setaffinity(0, std::mem::size_of::<cpu_set_t>(), &cs);
103 }
104}
105
106pub fn dzmmap(
107 start: Address,
108 size: usize,
109 strategy: MmapStrategy,
110 annotation: &MmapAnnotation<'_>,
111) -> MmapResult<Address> {
112 let addr = unix_common::mmap(start, size, strategy, annotation)?;
113
114 if !cfg!(feature = "no_mmap_annotation") {
115 set_vma_name(addr, size, annotation);
116 }
117
118 set_hugepage(addr, size, strategy.huge_page).expect("Failed to set huge page option");
119
120 Ok(addr)
123}
124
125pub fn dzmmap_anywhere(
126 size: usize,
127 align: usize,
128 strategy: MmapStrategy,
129 annotation: &MmapAnnotation<'_>,
130) -> Result<Address> {
131 let addr = unix_common::mmap_anywhere(size, align, strategy)?;
132 if !cfg!(feature = "no_mmap_annotation") {
133 set_vma_name(addr, size, annotation);
134 }
135 Ok(addr)
136}
137
138pub fn panic_if_unmapped(start: Address, size: usize) {
139 let strategy = MmapStrategy {
140 huge_page: HugePageSupport::No,
141 prot: MmapProtection::ReadWrite,
142 replace: false,
143 reserve: true,
144 };
145 let annotation = MmapAnnotation::Misc {
146 name: "panic_if_unmapped",
147 };
148 match unix_common::mmap(start, size, strategy, &annotation) {
149 Ok(_) => panic!("{} of size {} is not mapped", start, size),
150 Err(e) => {
151 assert!(
152 e.error.kind() == std::io::ErrorKind::AlreadyExists,
153 "Failed to check mapped: {:?}",
154 e
155 );
156 }
157 }
158}