Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) #include <errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3) #include <inttypes.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4) /* For the CPU_* macros */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5) #include <pthread.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7) #include <sys/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8) #include <sys/stat.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) #include <fcntl.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) #include <api/fs/fs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) #include <linux/err.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) #include <linux/string.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) #include <api/fs/tracing_path.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) #include "evsel.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) #include "tests.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) #include "thread_map.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) #include <perf/cpumap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) #include <internal/cpumap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) #include "debug.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) #include "stat.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) #include "util/counts.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) int test__openat_syscall_event_on_all_cpus(struct test *test __maybe_unused, int subtest __maybe_unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) 	int err = -1, fd, cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) 	struct perf_cpu_map *cpus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) 	struct evsel *evsel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) 	unsigned int nr_openat_calls = 111, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) 	cpu_set_t cpu_set;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) 	struct perf_thread_map *threads = thread_map__new(-1, getpid(), UINT_MAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) 	char sbuf[STRERR_BUFSIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) 	char errbuf[BUFSIZ];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) 	if (threads == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) 		pr_debug("thread_map__new\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) 		return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) 	cpus = perf_cpu_map__new(NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) 	if (cpus == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 		pr_debug("perf_cpu_map__new\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) 		goto out_thread_map_delete;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 	CPU_ZERO(&cpu_set);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) 	evsel = evsel__newtp("syscalls", "sys_enter_openat");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) 	if (IS_ERR(evsel)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 		tracing_path__strerror_open_tp(errno, errbuf, sizeof(errbuf), "syscalls", "sys_enter_openat");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 		pr_debug("%s\n", errbuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 		goto out_cpu_map_delete;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 	if (evsel__open(evsel, cpus, threads) < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 		pr_debug("failed to open counter: %s, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 			 "tweak /proc/sys/kernel/perf_event_paranoid?\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 			 str_error_r(errno, sbuf, sizeof(sbuf)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 		goto out_evsel_delete;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 	for (cpu = 0; cpu < cpus->nr; ++cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 		unsigned int ncalls = nr_openat_calls + cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 		 * XXX eventually lift this restriction in a way that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 		 * keeps perf building on older glibc installations
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 		 * without CPU_ALLOC. 1024 cpus in 2010 still seems
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 		 * a reasonable upper limit tho :-)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 		if (cpus->map[cpu] >= CPU_SETSIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 			pr_debug("Ignoring CPU %d\n", cpus->map[cpu]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 		CPU_SET(cpus->map[cpu], &cpu_set);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 		if (sched_setaffinity(0, sizeof(cpu_set), &cpu_set) < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 			pr_debug("sched_setaffinity() failed on CPU %d: %s ",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 				 cpus->map[cpu],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 				 str_error_r(errno, sbuf, sizeof(sbuf)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 			goto out_close_fd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 		for (i = 0; i < ncalls; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 			fd = openat(0, "/etc/passwd", O_RDONLY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 			close(fd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 		CPU_CLR(cpus->map[cpu], &cpu_set);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 	 * Here we need to explicitly preallocate the counts, as if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 	 * we use the auto allocation it will allocate just for 1 cpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 	 * as we start by cpu 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 	if (evsel__alloc_counts(evsel, cpus->nr, 1) < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 		pr_debug("evsel__alloc_counts(ncpus=%d)\n", cpus->nr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 		goto out_close_fd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 	err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 	for (cpu = 0; cpu < cpus->nr; ++cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 		unsigned int expected;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 		if (cpus->map[cpu] >= CPU_SETSIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 		if (evsel__read_on_cpu(evsel, cpu, 0) < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 			pr_debug("evsel__read_on_cpu\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 			err = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 		expected = nr_openat_calls + cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 		if (perf_counts(evsel->counts, cpu, 0)->val != expected) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 			pr_debug("evsel__read_on_cpu: expected to intercept %d calls on cpu %d, got %" PRIu64 "\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 				 expected, cpus->map[cpu], perf_counts(evsel->counts, cpu, 0)->val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 			err = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 	evsel__free_counts(evsel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) out_close_fd:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 	perf_evsel__close_fd(&evsel->core);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) out_evsel_delete:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 	evsel__delete(evsel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) out_cpu_map_delete:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 	perf_cpu_map__put(cpus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) out_thread_map_delete:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 	perf_thread_map__put(threads);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) }