mmtk/scheduler/
stat.rs

1//! Statistics for work packets
2use super::work_counter::{WorkCounter, WorkCounterBase, WorkDuration};
3#[cfg(feature = "perf_counter")]
4use crate::scheduler::work_counter::WorkPerfEvent;
5use crate::vm::VMBinding;
6use crate::MMTK;
7use std::any::TypeId;
8use std::collections::HashMap;
9use std::marker::PhantomData;
10use std::sync::atomic::{AtomicBool, Ordering};
11
12/// Merge and print the work-packet level statistics from all worker threads
13#[derive(Default)]
14pub struct SchedulerStat {
15    /// Map work packet type IDs to work packet names
16    work_id_name_map: HashMap<TypeId, &'static str>,
17    /// Count the number of work packets executed for different types
18    work_counts: HashMap<TypeId, usize>,
19    /// Collect work counters from work threads.
20    /// Two dimensional vectors are used, e.g.
21    /// `[[foo_0, ..., foo_n], ..., [bar_0, ..., bar_n]]`.
22    /// The first dimension is for different types of work counters,
23    /// (`foo` and `bar` in the above example).
24    /// The second dimension if for work counters of the same type but from
25    /// different threads (`foo_0` and `bar_0` are from the same thread).
26    /// The order of insertion is determined by when [`SchedulerStat::merge`] is
27    /// called for each [`WorkerLocalStat`].
28    /// We assume different threads have the same set of work counters
29    /// (in the same order).
30    work_counters: HashMap<TypeId, Vec<Vec<Box<dyn WorkCounter>>>>,
31}
32
33impl SchedulerStat {
34    /// Extract the work-packet name from the full type name.
35    /// i.e. simplifies `crate::scheduler::gc_work::SomeWorkPacket<Semispace>` to `SomeWorkPacket`.
36    fn work_name(&self, name: &str) -> String {
37        let end_index = name.find('<').unwrap_or(name.len());
38        let name = name[..end_index].to_owned();
39        match name.rfind(':') {
40            Some(start_index) => name[(start_index + 1)..end_index].to_owned(),
41            _ => name,
42        }
43    }
44
45    /// Used during statistics printing at [`crate::memory_manager::harness_end`]
46    pub fn harness_stat(&self) -> HashMap<String, String> {
47        let mut stat = HashMap::new();
48        let mut counts = HashMap::<String, usize>::new();
49        let mut times = HashMap::<String, f64>::new();
50        // Work counts
51        let mut total_count = 0;
52        for (t, c) in &self.work_counts {
53            total_count += c;
54            let n = self.work_id_name_map[t];
55            // We can have the same work names for different TypeIDs since work names strip
56            // type parameters away, while the same work packet with different type parameters
57            // are given different TypeIDs. Hence, we check if the key exists and update instead of
58            // overwrite it
59            let pkt = format!("work.{}.count", self.work_name(n));
60            let val = counts.entry(pkt).or_default();
61            *val += c;
62        }
63        stat.insert("total-work.count".to_owned(), format!("{}", total_count));
64        // Work execution times
65        let mut duration_overall: WorkCounterBase = Default::default();
66        for (t, vs) in &self.work_counters {
67            // Name of the work packet type
68            let n = self.work_id_name_map[t];
69            // Iterate through different types of work counters
70            for v in vs.iter() {
71                // Aggregate work counters of the same type but from different
72                // worker threads
73                let fold = v
74                    .iter()
75                    .fold(Default::default(), |acc: WorkCounterBase, x| {
76                        acc.merge(x.get_base())
77                    });
78                // Update the overall execution time
79                duration_overall.merge_inplace(&fold);
80                let name = v.first().unwrap().name();
81                let pkt_total = format!("work.{}.{}.total", self.work_name(n), name);
82                let pkt_min = format!("work.{}.{}.min", self.work_name(n), name);
83                let pkt_max = format!("work.{}.{}.max", self.work_name(n), name);
84
85                // We can have the same work names for different TypeIDs since work names strip
86                // type parameters away, while the same work packet with different type parameters
87                // are given different TypeIDs. Hence, we check if the key exists and update
88                // instead of overwrite it
89                let val = times.entry(pkt_total).or_default();
90                *val += fold.total;
91                let val = times.entry(pkt_min).or_default();
92                *val = f64::min(*val, fold.min);
93                let val = times.entry(pkt_max).or_default();
94                *val = f64::max(*val, fold.max);
95            }
96        }
97        // Convert to ms and print out overall execution time
98        stat.insert(
99            "total-work.time.total".to_owned(),
100            format!("{:.3}", duration_overall.total / 1e6),
101        );
102        stat.insert(
103            "total-work.time.min".to_owned(),
104            format!("{:.3}", duration_overall.min / 1e6),
105        );
106        stat.insert(
107            "total-work.time.max".to_owned(),
108            format!("{:.3}", duration_overall.max / 1e6),
109        );
110
111        for (pkt, count) in counts {
112            stat.insert(pkt, format!("{}", count));
113        }
114
115        for (pkt, time) in times {
116            stat.insert(pkt, format!("{:.3}", time / 1e6));
117        }
118
119        stat
120    }
121    /// Merge work counters from different worker threads
122    pub fn merge<C>(&mut self, stat: &WorkerLocalStat<C>) {
123        // Merge work packet type ID to work packet name mapping
124        for (id, name) in &stat.work_id_name_map {
125            self.work_id_name_map.insert(*id, *name);
126        }
127        // Merge work count for different work packet types
128        for (id, count) in &stat.work_counts {
129            if self.work_counts.contains_key(id) {
130                *self.work_counts.get_mut(id).unwrap() += *count;
131            } else {
132                self.work_counts.insert(*id, *count);
133            }
134        }
135        // Merge work counter for different work packet types
136        for (id, counters) in &stat.work_counters {
137            // Initialize the two dimensional vector
138            // [
139            //    [], // foo counter
140            //    [], // bar counter
141            // ]
142            let vs = self
143                .work_counters
144                .entry(*id)
145                .or_insert_with(|| vec![vec![]; counters.len()]);
146            // [
147            //    [counters[0] of type foo],
148            //    [counters[1] of type bar]
149            // ]
150            for (v, c) in vs.iter_mut().zip(counters.iter()) {
151                v.push(c.clone());
152            }
153        }
154    }
155}
156
157/// Describing a single work packet
158pub struct WorkStat {
159    type_id: TypeId,
160    type_name: &'static str,
161}
162
163impl WorkStat {
164    /// Stop all work counters for the work packet type of the just executed
165    /// work packet
166    pub fn end_of_work<VM: VMBinding>(&self, worker_stat: &mut WorkerLocalStat<VM>) {
167        if !worker_stat.is_enabled() {
168            return;
169        };
170        // Insert type ID, name pair
171        worker_stat
172            .work_id_name_map
173            .insert(self.type_id, self.type_name);
174        // Increment work count
175        *worker_stat.work_counts.entry(self.type_id).or_insert(0) += 1;
176        // Stop counters
177        worker_stat
178            .work_counters
179            .entry(self.type_id)
180            .and_modify(|v| {
181                v.iter_mut().for_each(|c| c.stop());
182            });
183    }
184}
185
186/// Worker thread local counterpart of [`SchedulerStat`]
187pub struct WorkerLocalStat<C> {
188    work_id_name_map: HashMap<TypeId, &'static str>,
189    work_counts: HashMap<TypeId, usize>,
190    work_counters: HashMap<TypeId, Vec<Box<dyn WorkCounter>>>,
191    enabled: AtomicBool,
192    _phantom: PhantomData<C>,
193}
194
195unsafe impl<C> Send for WorkerLocalStat<C> {}
196
197impl<C> Default for WorkerLocalStat<C> {
198    fn default() -> Self {
199        WorkerLocalStat {
200            work_id_name_map: Default::default(),
201            work_counts: Default::default(),
202            work_counters: Default::default(),
203            enabled: AtomicBool::new(false),
204            _phantom: Default::default(),
205        }
206    }
207}
208
209impl<VM: VMBinding> WorkerLocalStat<VM> {
210    pub fn is_enabled(&self) -> bool {
211        self.enabled.load(Ordering::SeqCst)
212    }
213    pub fn enable(&self) {
214        self.enabled.store(true, Ordering::SeqCst);
215    }
216    /// Measure the execution of a work packet by starting all counters for that
217    /// type
218    pub fn measure_work(
219        &mut self,
220        work_id: TypeId,
221        work_name: &'static str,
222        mmtk: &'static MMTK<VM>,
223    ) -> WorkStat {
224        let stat = WorkStat {
225            type_id: work_id,
226            type_name: work_name,
227        };
228        if self.is_enabled() {
229            self.work_counters
230                .entry(work_id)
231                .or_insert_with(|| Self::counter_set(mmtk))
232                .iter_mut()
233                .for_each(|c| c.start());
234        }
235        stat
236    }
237
238    #[allow(unused_variables, unused_mut)]
239    fn counter_set(mmtk: &'static MMTK<VM>) -> Vec<Box<dyn WorkCounter>> {
240        let mut counters: Vec<Box<dyn WorkCounter>> = vec![Box::new(WorkDuration::new())];
241        #[cfg(feature = "perf_counter")]
242        for e in &mmtk.options.work_perf_events.events {
243            counters.push(Box::new(WorkPerfEvent::new(
244                &e.0,
245                e.1,
246                e.2,
247                *mmtk.options.perf_exclude_kernel,
248            )));
249        }
250        counters
251    }
252}