^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) #include <errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) #include <inttypes.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) #include <unistd.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) #include <stdlib.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include <signal.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <sys/mman.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/string.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include "tests.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include "util/debug.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include "util/evsel.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include "util/evlist.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include "util/cpumap.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include "util/mmap.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include "util/thread_map.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <perf/evlist.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <perf/mmap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #define NR_LOOPS 10000000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) * This test will open software clock events (cpu-clock, task-clock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) * then check their frequency -> period conversion has no artifact of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) * setting period to 1 forcefully.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) static int __test__sw_clock_freq(enum perf_sw_ids clock_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) int i, err = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) volatile int tmp = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) u64 total_periods = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) int nr_samples = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) char sbuf[STRERR_BUFSIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) union perf_event *event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) struct evsel *evsel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) struct evlist *evlist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) struct perf_event_attr attr = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) .type = PERF_TYPE_SOFTWARE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) .config = clock_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) .sample_type = PERF_SAMPLE_PERIOD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) .exclude_kernel = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) .disabled = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) .freq = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) struct perf_cpu_map *cpus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) struct perf_thread_map *threads;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) struct mmap *md;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) attr.sample_freq = 500;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) evlist = evlist__new();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) if (evlist == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) pr_debug("evlist__new\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) evsel = evsel__new(&attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) if (evsel == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) pr_debug("evsel__new\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) goto out_delete_evlist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) evlist__add(evlist, evsel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) cpus = perf_cpu_map__dummy_new();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) threads = thread_map__new_by_tid(getpid());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) if (!cpus || !threads) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) pr_debug("Not enough memory to create thread/cpu maps\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) goto out_free_maps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) perf_evlist__set_maps(&evlist->core, cpus, threads);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) cpus = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) threads = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) if (evlist__open(evlist)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) const char *knob = "/proc/sys/kernel/perf_event_max_sample_rate";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) err = -errno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) pr_debug("Couldn't open evlist: %s\nHint: check %s, using %" PRIu64 " in this test.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) str_error_r(errno, sbuf, sizeof(sbuf)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) knob, (u64)attr.sample_freq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) goto out_delete_evlist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) err = evlist__mmap(evlist, 128);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) pr_debug("failed to mmap event: %d (%s)\n", errno,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) str_error_r(errno, sbuf, sizeof(sbuf)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) goto out_delete_evlist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) evlist__enable(evlist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) /* collect samples */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) for (i = 0; i < NR_LOOPS; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) tmp++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) evlist__disable(evlist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) md = &evlist->mmap[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) if (perf_mmap__read_init(&md->core) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) goto out_init;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) while ((event = perf_mmap__read_event(&md->core)) != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) struct perf_sample sample;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) if (event->header.type != PERF_RECORD_SAMPLE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) goto next_event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) err = perf_evlist__parse_sample(evlist, event, &sample);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) pr_debug("Error during parse sample\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) goto out_delete_evlist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) total_periods += sample.period;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) nr_samples++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) next_event:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) perf_mmap__consume(&md->core);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) perf_mmap__read_done(&md->core);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) out_init:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) if ((u64) nr_samples == total_periods) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) pr_debug("All (%d) samples have period value of 1!\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) nr_samples);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) err = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) out_free_maps:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) perf_cpu_map__put(cpus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) perf_thread_map__put(threads);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) out_delete_evlist:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) evlist__delete(evlist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) int test__sw_clock_freq(struct test *test __maybe_unused, int subtest __maybe_unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) ret = __test__sw_clock_freq(PERF_COUNT_SW_CPU_CLOCK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) ret = __test__sw_clock_freq(PERF_COUNT_SW_TASK_CLOCK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) }