^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) #include <errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) #include <inttypes.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) #include <math.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) #include <string.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include "counts.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include "cpumap.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include "debug.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include "header.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include "stat.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include "session.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include "target.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include "evlist.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include "evsel.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include "thread_map.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/zalloc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) void update_stats(struct stats *stats, u64 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) double delta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) stats->n++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) delta = val - stats->mean;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) stats->mean += delta / stats->n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) stats->M2 += delta*(val - stats->mean);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) if (val > stats->max)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) stats->max = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) if (val < stats->min)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) stats->min = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) double avg_stats(struct stats *stats)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) return stats->mean;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) * http://en.wikipedia.org/wiki/Algorithms_for_calculating_variance
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) * (\Sum n_i^2) - ((\Sum n_i)^2)/n
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) * s^2 = -------------------------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) * n - 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) * http://en.wikipedia.org/wiki/Stddev
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) * The std dev of the mean is related to the std dev by:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) * s
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) * s_mean = -------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) * sqrt(n)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) double stddev_stats(struct stats *stats)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) double variance, variance_mean;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) if (stats->n < 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) return 0.0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) variance = stats->M2 / (stats->n - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) variance_mean = variance / stats->n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) return sqrt(variance_mean);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) double rel_stddev_stats(double stddev, double avg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) double pct = 0.0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) if (avg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) pct = 100.0 * stddev/avg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) return pct;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) bool __perf_evsel_stat__is(struct evsel *evsel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) enum perf_stat_evsel_id id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) struct perf_stat_evsel *ps = evsel->stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) return ps->id == id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) #define ID(id, name) [PERF_STAT_EVSEL_ID__##id] = #name
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) static const char *id_str[PERF_STAT_EVSEL_ID__MAX] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) ID(NONE, x),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) ID(CYCLES_IN_TX, cpu/cycles-t/),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) ID(TRANSACTION_START, cpu/tx-start/),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) ID(ELISION_START, cpu/el-start/),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) ID(CYCLES_IN_TX_CP, cpu/cycles-ct/),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) ID(TOPDOWN_TOTAL_SLOTS, topdown-total-slots),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) ID(TOPDOWN_SLOTS_ISSUED, topdown-slots-issued),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) ID(TOPDOWN_SLOTS_RETIRED, topdown-slots-retired),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) ID(TOPDOWN_FETCH_BUBBLES, topdown-fetch-bubbles),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) ID(TOPDOWN_RECOVERY_BUBBLES, topdown-recovery-bubbles),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) ID(TOPDOWN_RETIRING, topdown-retiring),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) ID(TOPDOWN_BAD_SPEC, topdown-bad-spec),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) ID(TOPDOWN_FE_BOUND, topdown-fe-bound),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) ID(TOPDOWN_BE_BOUND, topdown-be-bound),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) ID(SMI_NUM, msr/smi/),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) ID(APERF, msr/aperf/),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) #undef ID
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) static void perf_stat_evsel_id_init(struct evsel *evsel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) struct perf_stat_evsel *ps = evsel->stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) /* ps->id is 0 hence PERF_STAT_EVSEL_ID__NONE by default */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) for (i = 0; i < PERF_STAT_EVSEL_ID__MAX; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) if (!strcmp(evsel__name(evsel), id_str[i])) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) ps->id = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) static void evsel__reset_stat_priv(struct evsel *evsel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) struct perf_stat_evsel *ps = evsel->stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) for (i = 0; i < 3; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) init_stats(&ps->res_stats[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) perf_stat_evsel_id_init(evsel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) static int evsel__alloc_stat_priv(struct evsel *evsel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) evsel->stats = zalloc(sizeof(struct perf_stat_evsel));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) if (evsel->stats == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) evsel__reset_stat_priv(evsel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) static void evsel__free_stat_priv(struct evsel *evsel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) struct perf_stat_evsel *ps = evsel->stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) if (ps)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) zfree(&ps->group_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) zfree(&evsel->stats);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) static int evsel__alloc_prev_raw_counts(struct evsel *evsel, int ncpus, int nthreads)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) struct perf_counts *counts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) counts = perf_counts__new(ncpus, nthreads);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) if (counts)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) evsel->prev_raw_counts = counts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) return counts ? 0 : -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) static void evsel__free_prev_raw_counts(struct evsel *evsel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) perf_counts__delete(evsel->prev_raw_counts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) evsel->prev_raw_counts = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) static void evsel__reset_prev_raw_counts(struct evsel *evsel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) if (evsel->prev_raw_counts)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) perf_counts__reset(evsel->prev_raw_counts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) static int evsel__alloc_stats(struct evsel *evsel, bool alloc_raw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) int ncpus = evsel__nr_cpus(evsel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) int nthreads = perf_thread_map__nr(evsel->core.threads);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) if (evsel__alloc_stat_priv(evsel) < 0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) evsel__alloc_counts(evsel, ncpus, nthreads) < 0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) (alloc_raw && evsel__alloc_prev_raw_counts(evsel, ncpus, nthreads) < 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) int perf_evlist__alloc_stats(struct evlist *evlist, bool alloc_raw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) struct evsel *evsel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) evlist__for_each_entry(evlist, evsel) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) if (evsel__alloc_stats(evsel, alloc_raw))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) out_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) perf_evlist__free_stats(evlist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) void perf_evlist__free_stats(struct evlist *evlist)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) struct evsel *evsel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) evlist__for_each_entry(evlist, evsel) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) evsel__free_stat_priv(evsel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) evsel__free_counts(evsel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) evsel__free_prev_raw_counts(evsel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) void perf_evlist__reset_stats(struct evlist *evlist)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) struct evsel *evsel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) evlist__for_each_entry(evlist, evsel) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) evsel__reset_stat_priv(evsel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) evsel__reset_counts(evsel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) void perf_evlist__reset_prev_raw_counts(struct evlist *evlist)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) struct evsel *evsel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) evlist__for_each_entry(evlist, evsel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) evsel__reset_prev_raw_counts(evsel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) static void perf_evsel__copy_prev_raw_counts(struct evsel *evsel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) int ncpus = evsel__nr_cpus(evsel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) int nthreads = perf_thread_map__nr(evsel->core.threads);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) for (int thread = 0; thread < nthreads; thread++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) for (int cpu = 0; cpu < ncpus; cpu++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) *perf_counts(evsel->counts, cpu, thread) =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) *perf_counts(evsel->prev_raw_counts, cpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) evsel->counts->aggr = evsel->prev_raw_counts->aggr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) void perf_evlist__copy_prev_raw_counts(struct evlist *evlist)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) struct evsel *evsel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) evlist__for_each_entry(evlist, evsel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) perf_evsel__copy_prev_raw_counts(evsel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) void perf_evlist__save_aggr_prev_raw_counts(struct evlist *evlist)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) struct evsel *evsel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) * To collect the overall statistics for interval mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) * we copy the counts from evsel->prev_raw_counts to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) * evsel->counts. The perf_stat_process_counter creates
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) * aggr values from per cpu values, but the per cpu values
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) * are 0 for AGGR_GLOBAL. So we use a trick that saves the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) * previous aggr value to the first member of perf_counts,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) * then aggr calculation in process_counter_values can work
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) * correctly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) evlist__for_each_entry(evlist, evsel) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) *perf_counts(evsel->prev_raw_counts, 0, 0) =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) evsel->prev_raw_counts->aggr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) static void zero_per_pkg(struct evsel *counter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) if (counter->per_pkg_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) memset(counter->per_pkg_mask, 0, cpu__max_cpu());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) static int check_per_pkg(struct evsel *counter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) struct perf_counts_values *vals, int cpu, bool *skip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) unsigned long *mask = counter->per_pkg_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) struct perf_cpu_map *cpus = evsel__cpus(counter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) int s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) *skip = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) if (!counter->per_pkg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) if (perf_cpu_map__empty(cpus))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) if (!mask) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) mask = zalloc(cpu__max_cpu());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) if (!mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) counter->per_pkg_mask = mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) * we do not consider an event that has not run as a good
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) * instance to mark a package as used (skip=1). Otherwise
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) * we may run into a situation where the first CPU in a package
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) * is not running anything, yet the second is, and this function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) * would mark the package as used after the first CPU and would
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) * not read the values from the second CPU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) if (!(vals->run && vals->ena))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) s = cpu_map__get_socket(cpus, cpu, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) if (s < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) *skip = test_and_set_bit(s, mask) == 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) process_counter_values(struct perf_stat_config *config, struct evsel *evsel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) int cpu, int thread,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) struct perf_counts_values *count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) struct perf_counts_values *aggr = &evsel->counts->aggr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) static struct perf_counts_values zero;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) bool skip = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) if (check_per_pkg(evsel, count, cpu, &skip)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) pr_err("failed to read per-pkg counter\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) if (skip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) count = &zero;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) switch (config->aggr_mode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) case AGGR_THREAD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) case AGGR_CORE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) case AGGR_DIE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) case AGGR_SOCKET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) case AGGR_NODE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) case AGGR_NONE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) if (!evsel->snapshot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) evsel__compute_deltas(evsel, cpu, thread, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) perf_counts_values__scale(count, config->scale, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) if ((config->aggr_mode == AGGR_NONE) && (!evsel->percore)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) perf_stat__update_shadow_stats(evsel, count->val,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) cpu, &rt_stat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) if (config->aggr_mode == AGGR_THREAD) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) if (config->stats)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) perf_stat__update_shadow_stats(evsel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) count->val, 0, &config->stats[thread]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) perf_stat__update_shadow_stats(evsel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) count->val, 0, &rt_stat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) case AGGR_GLOBAL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) aggr->val += count->val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) aggr->ena += count->ena;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) aggr->run += count->run;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) case AGGR_UNSET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) static int process_counter_maps(struct perf_stat_config *config,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) struct evsel *counter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) int nthreads = perf_thread_map__nr(counter->core.threads);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) int ncpus = evsel__nr_cpus(counter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) int cpu, thread;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) if (counter->core.system_wide)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) nthreads = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) for (thread = 0; thread < nthreads; thread++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) for (cpu = 0; cpu < ncpus; cpu++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) if (process_counter_values(config, counter, cpu, thread,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) perf_counts(counter->counts, cpu, thread)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) int perf_stat_process_counter(struct perf_stat_config *config,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) struct evsel *counter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) struct perf_counts_values *aggr = &counter->counts->aggr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) struct perf_stat_evsel *ps = counter->stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) u64 *count = counter->counts->aggr.values;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) int i, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) aggr->val = aggr->ena = aggr->run = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) * We calculate counter's data every interval,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) * and the display code shows ps->res_stats
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) * avg value. We need to zero the stats for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) * interval mode, otherwise overall avg running
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) * averages will be shown for each interval.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) if (config->interval || config->summary) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) for (i = 0; i < 3; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) init_stats(&ps->res_stats[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) if (counter->per_pkg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) zero_per_pkg(counter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) ret = process_counter_maps(config, counter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) if (config->aggr_mode != AGGR_GLOBAL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) if (!counter->snapshot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) evsel__compute_deltas(counter, -1, -1, aggr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) perf_counts_values__scale(aggr, config->scale, &counter->counts->scaled);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) for (i = 0; i < 3; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) update_stats(&ps->res_stats[i], count[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) if (verbose > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) fprintf(config->output, "%s: %" PRIu64 " %" PRIu64 " %" PRIu64 "\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) evsel__name(counter), count[0], count[1], count[2]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) * Save the full runtime - to allow normalization during printout:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) perf_stat__update_shadow_stats(counter, *count, 0, &rt_stat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) int perf_event__process_stat_event(struct perf_session *session,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) union perf_event *event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) struct perf_counts_values count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) struct perf_record_stat *st = &event->stat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) struct evsel *counter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) count.val = st->val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) count.ena = st->ena;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) count.run = st->run;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) counter = perf_evlist__id2evsel(session->evlist, st->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) if (!counter) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) pr_err("Failed to resolve counter for stat event.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) *perf_counts(counter->counts, st->cpu, st->thread) = count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) counter->supported = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) size_t perf_event__fprintf_stat(union perf_event *event, FILE *fp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) struct perf_record_stat *st = (struct perf_record_stat *)event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) size_t ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) ret = fprintf(fp, "\n... id %" PRI_lu64 ", cpu %d, thread %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) st->id, st->cpu, st->thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) ret += fprintf(fp, "... value %" PRI_lu64 ", enabled %" PRI_lu64 ", running %" PRI_lu64 "\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) st->val, st->ena, st->run);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) size_t perf_event__fprintf_stat_round(union perf_event *event, FILE *fp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) struct perf_record_stat_round *rd = (struct perf_record_stat_round *)event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) size_t ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) ret = fprintf(fp, "\n... time %" PRI_lu64 ", type %s\n", rd->time,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) rd->type == PERF_STAT_ROUND_TYPE__FINAL ? "FINAL" : "INTERVAL");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) size_t perf_event__fprintf_stat_config(union perf_event *event, FILE *fp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) struct perf_stat_config sc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) size_t ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) perf_event__read_stat_config(&sc, &event->stat_config);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) ret = fprintf(fp, "\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) ret += fprintf(fp, "... aggr_mode %d\n", sc.aggr_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) ret += fprintf(fp, "... scale %d\n", sc.scale);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) ret += fprintf(fp, "... interval %u\n", sc.interval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) int create_perf_stat_counter(struct evsel *evsel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) struct perf_stat_config *config,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) struct target *target,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) struct perf_event_attr *attr = &evsel->core.attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) struct evsel *leader = evsel->leader;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) attr->read_format = PERF_FORMAT_TOTAL_TIME_ENABLED |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) PERF_FORMAT_TOTAL_TIME_RUNNING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) * The event is part of non trivial group, let's enable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) * the group read (for leader) and ID retrieval for all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) * members.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) if (leader->core.nr_members > 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) attr->read_format |= PERF_FORMAT_ID|PERF_FORMAT_GROUP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) attr->inherit = !config->no_inherit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) * Some events get initialized with sample_(period/type) set,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) * like tracepoints. Clear it up for counting.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) attr->sample_period = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) if (config->identifier)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) attr->sample_type = PERF_SAMPLE_IDENTIFIER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) if (config->all_user) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) attr->exclude_kernel = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) attr->exclude_user = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) if (config->all_kernel) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) attr->exclude_kernel = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) attr->exclude_user = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) * Disabling all counters initially, they will be enabled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) * either manually by us or by kernel via enable_on_exec
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) * set later.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) if (evsel__is_group_leader(evsel)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) attr->disabled = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) * In case of initial_delay we enable tracee
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) * events manually.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) if (target__none(target) && !config->initial_delay)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) attr->enable_on_exec = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) if (target__has_cpu(target) && !target__has_per_thread(target))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) return evsel__open_per_cpu(evsel, evsel__cpus(evsel), cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) return evsel__open_per_thread(evsel, evsel->core.threads);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) }