mmtk/util/statistics/
stats.rs

1use crate::mmtk::MMTK;
2use crate::util::options::Options;
3use crate::util::statistics::counter::*;
4use crate::util::statistics::Timer;
5use crate::vm::VMBinding;
6
7#[cfg(feature = "perf_counter")]
8use pfm::Perfmon;
9use std::collections::HashMap;
10use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
11use std::sync::Arc;
12use std::sync::Mutex;
13
14/// The default number of phases for statistics.
15pub const DEFAULT_NUM_PHASES: usize = 1 << 12;
16pub const MAX_COUNTERS: usize = 100;
17
18/// GC stats shared among counters
19pub struct SharedStats {
20    phase: AtomicUsize,
21    gathering_stats: AtomicBool,
22}
23
24impl SharedStats {
25    fn increment_phase(&self) {
26        self.phase.fetch_add(1, Ordering::SeqCst);
27    }
28
29    pub fn get_phase(&self) -> usize {
30        self.phase.load(Ordering::SeqCst)
31    }
32
33    pub fn get_gathering_stats(&self) -> bool {
34        self.gathering_stats.load(Ordering::SeqCst)
35    }
36
37    fn set_gathering_stats(&self, val: bool) {
38        self.gathering_stats.store(val, Ordering::SeqCst);
39    }
40}
41
42/// GC statistics
43///
44/// The struct holds basic GC statistics, like the GC count,
45/// and an array of counters.
46pub struct Stats {
47    gc_count: AtomicUsize,
48    total_time: Arc<Mutex<Timer>>,
49    // crate `pfm` uses libpfm4 under the hood for parsing perf event names
50    // Initialization of libpfm4 is required before we can use `PerfEvent` types
51    #[cfg(feature = "perf_counter")]
52    perfmon: Perfmon,
53    pub shared: Arc<SharedStats>,
54    counters: Mutex<Vec<Arc<Mutex<dyn Counter + Send>>>>,
55}
56
57impl Stats {
58    #[allow(unused)]
59    pub fn new(options: &Options) -> Self {
60        // Create a perfmon instance and initialize it
61        // we use perfmon to parse perf event names
62        #[cfg(feature = "perf_counter")]
63        let perfmon = {
64            let mut perfmon: Perfmon = Default::default();
65            perfmon.initialize().expect("Perfmon failed to initialize");
66            perfmon
67        };
68        let shared = Arc::new(SharedStats {
69            phase: AtomicUsize::new(0),
70            gathering_stats: AtomicBool::new(false),
71        });
72        let mut counters: Vec<Arc<Mutex<dyn Counter + Send>>> = vec![];
73        // We always have a time counter enabled
74        let t = Arc::new(Mutex::new(LongCounter::new(
75            "time".to_string(),
76            shared.clone(),
77            true,
78            false,
79            MonotoneNanoTime {},
80        )));
81        counters.push(t.clone());
82        // Read from the MMTK option for a list of perf events we want to
83        // measure, and create corresponding counters
84        #[cfg(feature = "perf_counter")]
85        for e in &options.phase_perf_events.events {
86            counters.push(Arc::new(Mutex::new(LongCounter::new(
87                e.0.clone(),
88                shared.clone(),
89                true,
90                false,
91                PerfEventDiffable::new(&e.0, *options.perf_exclude_kernel),
92            ))));
93        }
94        Stats {
95            gc_count: AtomicUsize::new(0),
96            total_time: t,
97            #[cfg(feature = "perf_counter")]
98            perfmon,
99            shared,
100            counters: Mutex::new(counters),
101        }
102    }
103
104    pub fn new_event_counter(
105        &self,
106        name: &str,
107        implicit_start: bool,
108        merge_phases: bool,
109    ) -> Arc<Mutex<EventCounter>> {
110        let mut guard = self.counters.lock().unwrap();
111        let counter = Arc::new(Mutex::new(EventCounter::new(
112            name.to_string(),
113            self.shared.clone(),
114            implicit_start,
115            merge_phases,
116        )));
117        guard.push(counter.clone());
118        counter
119    }
120
121    pub fn new_size_counter(
122        &self,
123        name: &str,
124        implicit_start: bool,
125        merge_phases: bool,
126    ) -> Mutex<SizeCounter> {
127        let u = self.new_event_counter(name, implicit_start, merge_phases);
128        let v = self.new_event_counter(&format!("{}.volume", name), implicit_start, merge_phases);
129        Mutex::new(SizeCounter::new(u, v))
130    }
131
132    pub fn new_timer(
133        &self,
134        name: &str,
135        implicit_start: bool,
136        merge_phases: bool,
137    ) -> Arc<Mutex<Timer>> {
138        let mut guard = self.counters.lock().unwrap();
139        let counter = Arc::new(Mutex::new(Timer::new(
140            name.to_string(),
141            self.shared.clone(),
142            implicit_start,
143            merge_phases,
144            MonotoneNanoTime {},
145        )));
146        guard.push(counter.clone());
147        counter
148    }
149
150    pub fn start_gc(&self) {
151        self.gc_count.fetch_add(1, Ordering::SeqCst);
152        if !self.get_gathering_stats() {
153            return;
154        }
155        let counters = self.counters.lock().unwrap();
156        for counter in &(*counters) {
157            counter.lock().unwrap().phase_change(self.get_phase());
158        }
159        self.shared.increment_phase();
160    }
161
162    pub fn end_gc(&self) {
163        if !self.get_gathering_stats() {
164            return;
165        }
166        let counters = self.counters.lock().unwrap();
167        for counter in &(*counters) {
168            counter.lock().unwrap().phase_change(self.get_phase());
169        }
170        self.shared.increment_phase();
171    }
172
173    pub fn print_stats<VM: VMBinding>(&self, mmtk: &'static MMTK<VM>) {
174        println!(
175            "============================ MMTk Statistics Totals ============================"
176        );
177        let scheduler_stat = mmtk.scheduler.statistics();
178        self.print_column_names(&scheduler_stat);
179        print!("{}\t", self.get_phase() / 2);
180        let counter = self.counters.lock().unwrap();
181        for iter in &(*counter) {
182            let c = iter.lock().unwrap();
183            if c.merge_phases() {
184                c.print_total(None);
185            } else {
186                c.print_total(Some(true));
187                print!("\t");
188                c.print_total(Some(false));
189            }
190            print!("\t");
191        }
192        for value in scheduler_stat.values() {
193            print!("{}\t", value);
194        }
195        println!();
196        print!("Total time: ");
197        self.total_time.lock().unwrap().print_total(None);
198        println!(" ms");
199        println!("------------------------------ End MMTk Statistics -----------------------------")
200    }
201
202    pub fn print_column_names(&self, scheduler_stat: &HashMap<String, String>) {
203        print!("GC\t");
204        let counter = self.counters.lock().unwrap();
205        for iter in &(*counter) {
206            let c = iter.lock().unwrap();
207            if c.merge_phases() {
208                print!("{}\t", c.name());
209            } else {
210                print!("{}.other\t{}.stw\t", c.name(), c.name());
211            }
212        }
213        for name in scheduler_stat.keys() {
214            print!("{}\t", name);
215        }
216        println!();
217    }
218
219    pub fn start_all(&self) {
220        let counters = self.counters.lock().unwrap();
221        if self.get_gathering_stats() {
222            panic!("calling Stats.startAll() while stats running");
223        }
224        self.shared.set_gathering_stats(true);
225
226        for c in &(*counters) {
227            let mut ctr = c.lock().unwrap();
228            if ctr.implicitly_start() {
229                ctr.start();
230            }
231        }
232    }
233
234    pub fn stop_all<VM: VMBinding>(&self, mmtk: &'static MMTK<VM>) {
235        self.stop_all_counters();
236        self.print_stats(mmtk);
237    }
238
239    fn stop_all_counters(&self) {
240        let counters = self.counters.lock().unwrap();
241        for c in &(*counters) {
242            c.lock().unwrap().stop();
243        }
244        self.shared.set_gathering_stats(false);
245    }
246
247    fn get_phase(&self) -> usize {
248        self.shared.get_phase()
249    }
250
251    pub fn get_gathering_stats(&self) -> bool {
252        self.shared.get_gathering_stats()
253    }
254}