mmtk/util/os/imp/unix_like/linux_like/
linux_common.rs1use crate::util::address::Address;
2use crate::util::os::imp::unix_like::unix_common;
3use crate::util::os::*;
4use libc::{cpu_set_t, sched_getaffinity, sched_setaffinity, CPU_COUNT, CPU_SET, CPU_ZERO};
5use std::io::Result;
6
7pub fn set_vma_name(start: Address, size: usize, annotation: &MmapAnnotation) {
8 let anno_str = annotation.to_string();
15 let anno_cstr = std::ffi::CString::new(anno_str).unwrap();
16 let result = unix_common::wrap_libc_call(
17 &|| unsafe {
18 libc::prctl(
19 libc::PR_SET_VMA,
20 libc::PR_SET_VMA_ANON_NAME,
21 start.to_ptr::<libc::c_void>(),
22 size,
23 anno_cstr.as_ptr(),
24 )
25 },
26 0,
27 );
28 if let Err(e) = result {
29 debug!("Error while calling prctl: {e}");
30 }
31}
32
33pub fn set_hugepage(start: Address, size: usize, options: HugePageSupport) -> Result<()> {
35 match options {
36 HugePageSupport::No => Ok(()),
37 HugePageSupport::TransparentHugePages => unix_common::wrap_libc_call(
38 &|| unsafe { libc::madvise(start.to_mut_ptr(), size, libc::MADV_HUGEPAGE) },
39 0,
40 ),
41 }
42}
43
44impl MmapStrategy {
45 pub fn get_posix_mmap_flags(&self) -> i32 {
47 let mut flags = libc::MAP_PRIVATE | libc::MAP_ANONYMOUS;
48 if self.replace {
49 flags |= libc::MAP_FIXED;
50 } else {
51 flags |= libc::MAP_FIXED_NOREPLACE
52 }
53 if !self.reserve {
54 flags |= libc::MAP_NORESERVE;
55 }
56 flags
57 }
58}
59
60pub fn get_process_memory_maps() -> Result<String> {
63 use std::fs::File;
65 use std::io::Read;
66 let mut data = String::new();
67 let mut f = File::open("/proc/self/maps")?;
68 f.read_to_string(&mut data)?;
69 Ok(data)
70}
71
72pub fn get_total_num_cpus() -> CoreNum {
73 use std::mem::MaybeUninit;
74 unsafe {
75 let mut cs = MaybeUninit::zeroed().assume_init();
76 CPU_ZERO(&mut cs);
77 sched_getaffinity(0, std::mem::size_of::<cpu_set_t>(), &mut cs);
78 CPU_COUNT(&cs) as u16
79 }
80}
81
82pub fn bind_current_thread_to_core(core_id: CoreId) {
83 use std::mem::MaybeUninit;
84 unsafe {
85 let mut cs = MaybeUninit::zeroed().assume_init();
86 CPU_ZERO(&mut cs);
87 CPU_SET(core_id as usize, &mut cs);
88 sched_setaffinity(0, std::mem::size_of::<cpu_set_t>(), &cs);
89 }
90}
91
92pub fn bind_current_thread_to_cpuset(cpuset: &[CoreId]) {
93 use std::mem::MaybeUninit;
94 unsafe {
95 let mut cs = MaybeUninit::zeroed().assume_init();
96 CPU_ZERO(&mut cs);
97 for cpu in cpuset {
98 CPU_SET(*cpu as usize, &mut cs);
99 }
100 sched_setaffinity(0, std::mem::size_of::<cpu_set_t>(), &cs);
101 }
102}
103
104pub fn dzmmap(
105 start: Address,
106 size: usize,
107 strategy: MmapStrategy,
108 annotation: &MmapAnnotation<'_>,
109) -> MmapResult<Address> {
110 let addr = unix_common::mmap(start, size, strategy, annotation)?;
111
112 if !cfg!(feature = "no_mmap_annotation") {
113 set_vma_name(addr, size, annotation);
114 }
115
116 set_hugepage(addr, size, strategy.huge_page).expect("Failed to set huge page option");
117
118 Ok(addr)
121}
122
123pub fn panic_if_unmapped(start: Address, size: usize) {
124 let strategy = MmapStrategy {
125 huge_page: HugePageSupport::No,
126 prot: MmapProtection::ReadWrite,
127 replace: false,
128 reserve: true,
129 };
130 let annotation = MmapAnnotation::Misc {
131 name: "panic_if_unmapped",
132 };
133 match unix_common::mmap(start, size, strategy, &annotation) {
134 Ok(_) => panic!("{} of size {} is not mapped", start, size),
135 Err(e) => {
136 assert!(
137 e.error.kind() == std::io::ErrorKind::AlreadyExists,
138 "Failed to check mapped: {:?}",
139 e
140 );
141 }
142 }
143}