^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /* Copyright (c) 2016 Facebook
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) #include <stdio.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) #include <unistd.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include <stdlib.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <stdbool.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <string.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/perf_event.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/bpf.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <signal.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <sys/resource.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <bpf/bpf.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <bpf/libbpf.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include "perf-sys.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include "trace_helpers.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #define SAMPLE_FREQ 50
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) static int pid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) /* counts, stackmap */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) static int map_fd[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) struct bpf_program *prog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) static bool sys_read_seen, sys_write_seen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) static void print_ksym(__u64 addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) struct ksym *sym;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) if (!addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) sym = ksym_search(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) if (!sym) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) printf("ksym not found. Is kallsyms loaded?\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) printf("%s;", sym->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) if (!strstr(sym->name, "sys_read"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) sys_read_seen = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) else if (!strstr(sym->name, "sys_write"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) sys_write_seen = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) static void print_addr(__u64 addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) if (!addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) printf("%llx;", addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) #define TASK_COMM_LEN 16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) struct key_t {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) char comm[TASK_COMM_LEN];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) __u32 kernstack;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) __u32 userstack;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) static void print_stack(struct key_t *key, __u64 count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) __u64 ip[PERF_MAX_STACK_DEPTH] = {};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) static bool warned;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) printf("%3lld %s;", count, key->comm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) if (bpf_map_lookup_elem(map_fd[1], &key->kernstack, ip) != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) printf("---;");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) for (i = PERF_MAX_STACK_DEPTH - 1; i >= 0; i--)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) print_ksym(ip[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) printf("-;");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) if (bpf_map_lookup_elem(map_fd[1], &key->userstack, ip) != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) printf("---;");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) for (i = PERF_MAX_STACK_DEPTH - 1; i >= 0; i--)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) print_addr(ip[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) if (count < 6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) printf("\r");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) printf("\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) if (key->kernstack == -EEXIST && !warned) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) printf("stackmap collisions seen. Consider increasing size\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) warned = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) } else if ((int)key->kernstack < 0 && (int)key->userstack < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) printf("err stackid %d %d\n", key->kernstack, key->userstack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) static void err_exit(int err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) kill(pid, SIGKILL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) exit(err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) static void print_stacks(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) struct key_t key = {}, next_key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) __u64 value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) __u32 stackid = 0, next_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) int error = 1, fd = map_fd[0], stack_map = map_fd[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) sys_read_seen = sys_write_seen = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) while (bpf_map_get_next_key(fd, &key, &next_key) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) bpf_map_lookup_elem(fd, &next_key, &value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) print_stack(&next_key, value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) bpf_map_delete_elem(fd, &next_key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) key = next_key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) printf("\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) if (!sys_read_seen || !sys_write_seen) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) printf("BUG kernel stack doesn't contain sys_read() and sys_write()\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) err_exit(error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) /* clear stack map */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) while (bpf_map_get_next_key(stack_map, &stackid, &next_id) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) bpf_map_delete_elem(stack_map, &next_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) stackid = next_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) static inline int generate_load(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) if (system("dd if=/dev/zero of=/dev/null count=5000k status=none") < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) printf("failed to generate some load with dd: %s\n", strerror(errno));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) static void test_perf_event_all_cpu(struct perf_event_attr *attr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) int nr_cpus = sysconf(_SC_NPROCESSORS_ONLN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) struct bpf_link **links = calloc(nr_cpus, sizeof(struct bpf_link *));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) int i, pmu_fd, error = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) if (!links) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) printf("malloc of links failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) /* system wide perf event, no need to inherit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) attr->inherit = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) /* open perf_event on all cpus */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) for (i = 0; i < nr_cpus; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) pmu_fd = sys_perf_event_open(attr, -1, i, -1, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) if (pmu_fd < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) printf("sys_perf_event_open failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) goto all_cpu_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) links[i] = bpf_program__attach_perf_event(prog, pmu_fd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) if (libbpf_get_error(links[i])) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) printf("bpf_program__attach_perf_event failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) links[i] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) close(pmu_fd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) goto all_cpu_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) if (generate_load() < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) goto all_cpu_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) print_stacks();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) all_cpu_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) for (i--; i >= 0; i--)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) bpf_link__destroy(links[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) free(links);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) err_exit(error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) static void test_perf_event_task(struct perf_event_attr *attr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) struct bpf_link *link = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) int pmu_fd, error = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) /* per task perf event, enable inherit so the "dd ..." command can be traced properly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) * Enabling inherit will cause bpf_perf_prog_read_time helper failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) attr->inherit = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) /* open task bound event */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) pmu_fd = sys_perf_event_open(attr, 0, -1, -1, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) if (pmu_fd < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) printf("sys_perf_event_open failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) link = bpf_program__attach_perf_event(prog, pmu_fd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) if (libbpf_get_error(link)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) printf("bpf_program__attach_perf_event failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) link = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) close(pmu_fd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) if (generate_load() < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) print_stacks();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) bpf_link__destroy(link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) err_exit(error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) static void test_bpf_perf_event(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) struct perf_event_attr attr_type_hw = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) .sample_freq = SAMPLE_FREQ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) .freq = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) .type = PERF_TYPE_HARDWARE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) .config = PERF_COUNT_HW_CPU_CYCLES,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) struct perf_event_attr attr_type_sw = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) .sample_freq = SAMPLE_FREQ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) .freq = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) .type = PERF_TYPE_SOFTWARE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) .config = PERF_COUNT_SW_CPU_CLOCK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) struct perf_event_attr attr_hw_cache_l1d = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) .sample_freq = SAMPLE_FREQ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) .freq = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) .type = PERF_TYPE_HW_CACHE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) .config =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) PERF_COUNT_HW_CACHE_L1D |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) (PERF_COUNT_HW_CACHE_OP_READ << 8) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) struct perf_event_attr attr_hw_cache_branch_miss = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) .sample_freq = SAMPLE_FREQ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) .freq = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) .type = PERF_TYPE_HW_CACHE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) .config =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) PERF_COUNT_HW_CACHE_BPU |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) (PERF_COUNT_HW_CACHE_OP_READ << 8) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) (PERF_COUNT_HW_CACHE_RESULT_MISS << 16),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) struct perf_event_attr attr_type_raw = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) .sample_freq = SAMPLE_FREQ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) .freq = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) .type = PERF_TYPE_RAW,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) /* Intel Instruction Retired */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) .config = 0xc0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) struct perf_event_attr attr_type_raw_lock_load = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) .sample_freq = SAMPLE_FREQ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) .freq = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) .type = PERF_TYPE_RAW,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) /* Intel MEM_UOPS_RETIRED.LOCK_LOADS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) .config = 0x21d0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) /* Request to record lock address from PEBS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) .sample_type = PERF_SAMPLE_ADDR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) /* Record address value requires precise event */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) .precise_ip = 2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) printf("Test HW_CPU_CYCLES\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) test_perf_event_all_cpu(&attr_type_hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) test_perf_event_task(&attr_type_hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) printf("Test SW_CPU_CLOCK\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) test_perf_event_all_cpu(&attr_type_sw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) test_perf_event_task(&attr_type_sw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) printf("Test HW_CACHE_L1D\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) test_perf_event_all_cpu(&attr_hw_cache_l1d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) test_perf_event_task(&attr_hw_cache_l1d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) printf("Test HW_CACHE_BPU\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) test_perf_event_all_cpu(&attr_hw_cache_branch_miss);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) test_perf_event_task(&attr_hw_cache_branch_miss);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) printf("Test Instruction Retired\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) test_perf_event_all_cpu(&attr_type_raw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) test_perf_event_task(&attr_type_raw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) printf("Test Lock Load\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) test_perf_event_all_cpu(&attr_type_raw_lock_load);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) test_perf_event_task(&attr_type_raw_lock_load);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) printf("*** PASS ***\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) int main(int argc, char **argv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) struct rlimit r = {RLIM_INFINITY, RLIM_INFINITY};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) struct bpf_object *obj = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) char filename[256];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) int error = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) setrlimit(RLIMIT_MEMLOCK, &r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) signal(SIGINT, err_exit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) signal(SIGTERM, err_exit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) if (load_kallsyms()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) printf("failed to process /proc/kallsyms\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) goto cleanup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) obj = bpf_object__open_file(filename, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) if (libbpf_get_error(obj)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) printf("opening BPF object file failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) obj = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) goto cleanup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) prog = bpf_object__find_program_by_name(obj, "bpf_prog1");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) if (!prog) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) printf("finding a prog in obj file failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) goto cleanup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) /* load BPF program */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) if (bpf_object__load(obj)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) printf("loading BPF object file failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) goto cleanup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) map_fd[0] = bpf_object__find_map_fd_by_name(obj, "counts");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) map_fd[1] = bpf_object__find_map_fd_by_name(obj, "stackmap");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) if (map_fd[0] < 0 || map_fd[1] < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) printf("finding a counts/stackmap map in obj file failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) goto cleanup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) pid = fork();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) if (pid == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) read_trace_pipe();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) } else if (pid == -1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) printf("couldn't spawn process\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) goto cleanup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) test_bpf_perf_event();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) cleanup:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) bpf_object__close(obj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) err_exit(error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) }