Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2) /* Manage affinity to optimize IPIs inside the kernel perf API. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3) #define _GNU_SOURCE 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4) #include <sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5) #include <stdlib.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6) #include <linux/bitmap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7) #include <linux/zalloc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8) #include "perf.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9) #include "cpumap.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include "affinity.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) static int get_cpu_set_size(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) 	int sz = cpu__max_cpu() + 8 - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) 	 * sched_getaffinity doesn't like masks smaller than the kernel.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) 	 * Hopefully that's big enough.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) 	if (sz < 4096)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) 		sz = 4096;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) 	return sz / 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) int affinity__setup(struct affinity *a)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) 	int cpu_set_size = get_cpu_set_size();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) 	a->orig_cpus = bitmap_alloc(cpu_set_size * 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) 	if (!a->orig_cpus)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) 		return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) 	sched_getaffinity(0, cpu_set_size, (cpu_set_t *)a->orig_cpus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) 	a->sched_cpus = bitmap_alloc(cpu_set_size * 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) 	if (!a->sched_cpus) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) 		zfree(&a->orig_cpus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) 		return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) 	bitmap_zero((unsigned long *)a->sched_cpus, cpu_set_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) 	a->changed = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43)  * perf_event_open does an IPI internally to the target CPU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44)  * It is more efficient to change perf's affinity to the target
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45)  * CPU and then set up all events on that CPU, so we amortize
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46)  * CPU communication.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) void affinity__set(struct affinity *a, int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) 	int cpu_set_size = get_cpu_set_size();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) 	if (cpu == -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) 	a->changed = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) 	set_bit(cpu, a->sched_cpus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) 	 * We ignore errors because affinity is just an optimization.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) 	 * This could happen for example with isolated CPUs or cpusets.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) 	 * In this case the IPIs inside the kernel's perf API still work.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) 	sched_setaffinity(0, cpu_set_size, (cpu_set_t *)a->sched_cpus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) 	clear_bit(cpu, a->sched_cpus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) void affinity__cleanup(struct affinity *a)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) 	int cpu_set_size = get_cpu_set_size();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) 	if (a->changed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) 		sched_setaffinity(0, cpu_set_size, (cpu_set_t *)a->orig_cpus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) 	zfree(&a->sched_cpus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) 	zfree(&a->orig_cpus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) }