^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) #include "cpumap.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) #include "debug.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) #include "env.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) #include "util/header.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include <linux/ctype.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/zalloc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include "bpf-event.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include "cgroup.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <sys/utsname.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <bpf/libbpf.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <stdlib.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <string.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) struct perf_env perf_env;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) void perf_env__insert_bpf_prog_info(struct perf_env *env,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) struct bpf_prog_info_node *info_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) __u32 prog_id = info_node->info_linear->info.id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) struct bpf_prog_info_node *node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) struct rb_node *parent = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) struct rb_node **p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) down_write(&env->bpf_progs.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) p = &env->bpf_progs.infos.rb_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) while (*p != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) parent = *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) node = rb_entry(parent, struct bpf_prog_info_node, rb_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) if (prog_id < node->info_linear->info.id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) p = &(*p)->rb_left;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) } else if (prog_id > node->info_linear->info.id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) p = &(*p)->rb_right;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) pr_debug("duplicated bpf prog info %u\n", prog_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) rb_link_node(&info_node->rb_node, parent, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) rb_insert_color(&info_node->rb_node, &env->bpf_progs.infos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) env->bpf_progs.infos_cnt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) up_write(&env->bpf_progs.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) struct bpf_prog_info_node *perf_env__find_bpf_prog_info(struct perf_env *env,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) __u32 prog_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) struct bpf_prog_info_node *node = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) struct rb_node *n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) down_read(&env->bpf_progs.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) n = env->bpf_progs.infos.rb_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) while (n) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) node = rb_entry(n, struct bpf_prog_info_node, rb_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) if (prog_id < node->info_linear->info.id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) n = n->rb_left;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) else if (prog_id > node->info_linear->info.id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) n = n->rb_right;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) node = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) up_read(&env->bpf_progs.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) return node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) bool perf_env__insert_btf(struct perf_env *env, struct btf_node *btf_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) struct rb_node *parent = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) __u32 btf_id = btf_node->id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) struct btf_node *node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) struct rb_node **p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) bool ret = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) down_write(&env->bpf_progs.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) p = &env->bpf_progs.btfs.rb_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) while (*p != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) parent = *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) node = rb_entry(parent, struct btf_node, rb_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) if (btf_id < node->id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) p = &(*p)->rb_left;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) } else if (btf_id > node->id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) p = &(*p)->rb_right;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) pr_debug("duplicated btf %u\n", btf_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) ret = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) rb_link_node(&btf_node->rb_node, parent, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) rb_insert_color(&btf_node->rb_node, &env->bpf_progs.btfs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) env->bpf_progs.btfs_cnt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) up_write(&env->bpf_progs.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) struct btf_node *perf_env__find_btf(struct perf_env *env, __u32 btf_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) struct btf_node *node = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) struct rb_node *n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) down_read(&env->bpf_progs.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) n = env->bpf_progs.btfs.rb_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) while (n) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) node = rb_entry(n, struct btf_node, rb_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) if (btf_id < node->id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) n = n->rb_left;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) else if (btf_id > node->id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) n = n->rb_right;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) node = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) up_read(&env->bpf_progs.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) return node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) /* purge data in bpf_progs.infos tree */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) static void perf_env__purge_bpf(struct perf_env *env)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) struct rb_root *root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) struct rb_node *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) down_write(&env->bpf_progs.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) root = &env->bpf_progs.infos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) next = rb_first(root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) while (next) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) struct bpf_prog_info_node *node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) node = rb_entry(next, struct bpf_prog_info_node, rb_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) next = rb_next(&node->rb_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) rb_erase(&node->rb_node, root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) free(node->info_linear);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) free(node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) env->bpf_progs.infos_cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) root = &env->bpf_progs.btfs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) next = rb_first(root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) while (next) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) struct btf_node *node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) node = rb_entry(next, struct btf_node, rb_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) next = rb_next(&node->rb_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) rb_erase(&node->rb_node, root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) free(node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) env->bpf_progs.btfs_cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) up_write(&env->bpf_progs.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) void perf_env__exit(struct perf_env *env)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) perf_env__purge_bpf(env);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) perf_env__purge_cgroups(env);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) zfree(&env->hostname);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) zfree(&env->os_release);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) zfree(&env->version);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) zfree(&env->arch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) zfree(&env->cpu_desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) zfree(&env->cpuid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) zfree(&env->cmdline);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) zfree(&env->cmdline_argv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) zfree(&env->sibling_dies);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) zfree(&env->sibling_cores);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) zfree(&env->sibling_threads);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) zfree(&env->pmu_mappings);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) zfree(&env->cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) zfree(&env->cpu_pmu_caps);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) zfree(&env->numa_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) for (i = 0; i < env->nr_numa_nodes; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) perf_cpu_map__put(env->numa_nodes[i].map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) zfree(&env->numa_nodes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) for (i = 0; i < env->caches_cnt; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) cpu_cache_level__free(&env->caches[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) zfree(&env->caches);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) for (i = 0; i < env->nr_memory_nodes; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) zfree(&env->memory_nodes[i].set);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) zfree(&env->memory_nodes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) void perf_env__init(struct perf_env *env)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) env->bpf_progs.infos = RB_ROOT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) env->bpf_progs.btfs = RB_ROOT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) init_rwsem(&env->bpf_progs.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) int perf_env__set_cmdline(struct perf_env *env, int argc, const char *argv[])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) /* do not include NULL termination */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) env->cmdline_argv = calloc(argc, sizeof(char *));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) if (env->cmdline_argv == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) goto out_enomem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) * Must copy argv contents because it gets moved around during option
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) * parsing:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) for (i = 0; i < argc ; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) env->cmdline_argv[i] = argv[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) if (env->cmdline_argv[i] == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) env->nr_cmdline = argc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) out_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) zfree(&env->cmdline_argv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) out_enomem:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) int perf_env__read_cpu_topology_map(struct perf_env *env)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) int cpu, nr_cpus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) if (env->cpu != NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) if (env->nr_cpus_avail == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) env->nr_cpus_avail = cpu__max_present_cpu();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) nr_cpus = env->nr_cpus_avail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) if (nr_cpus == -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) env->cpu = calloc(nr_cpus, sizeof(env->cpu[0]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) if (env->cpu == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) for (cpu = 0; cpu < nr_cpus; ++cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) env->cpu[cpu].core_id = cpu_map__get_core_id(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) env->cpu[cpu].socket_id = cpu_map__get_socket_id(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) env->cpu[cpu].die_id = cpu_map__get_die_id(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) env->nr_cpus_avail = nr_cpus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) int perf_env__read_cpuid(struct perf_env *env)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) char cpuid[128];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) int err = get_cpuid(cpuid, sizeof(cpuid));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) free(env->cpuid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) env->cpuid = strdup(cpuid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) if (env->cpuid == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) return ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) static int perf_env__read_arch(struct perf_env *env)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) struct utsname uts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) if (env->arch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) if (!uname(&uts))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) env->arch = strdup(uts.machine);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) return env->arch ? 0 : -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) static int perf_env__read_nr_cpus_avail(struct perf_env *env)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) if (env->nr_cpus_avail == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) env->nr_cpus_avail = cpu__max_present_cpu();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) return env->nr_cpus_avail ? 0 : -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) const char *perf_env__raw_arch(struct perf_env *env)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) return env && !perf_env__read_arch(env) ? env->arch : "unknown";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) int perf_env__nr_cpus_avail(struct perf_env *env)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) return env && !perf_env__read_nr_cpus_avail(env) ? env->nr_cpus_avail : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) void cpu_cache_level__free(struct cpu_cache_level *cache)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) zfree(&cache->type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) zfree(&cache->map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) zfree(&cache->size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) * Return architecture name in a normalized form.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) * The conversion logic comes from the Makefile.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) static const char *normalize_arch(char *arch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) if (!strcmp(arch, "x86_64"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) return "x86";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) if (arch[0] == 'i' && arch[2] == '8' && arch[3] == '6')
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) return "x86";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) if (!strcmp(arch, "sun4u") || !strncmp(arch, "sparc", 5))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) return "sparc";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) if (!strcmp(arch, "aarch64") || !strcmp(arch, "arm64"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) return "arm64";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) if (!strncmp(arch, "arm", 3) || !strcmp(arch, "sa110"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) return "arm";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) if (!strncmp(arch, "s390", 4))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) return "s390";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) if (!strncmp(arch, "parisc", 6))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) return "parisc";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) if (!strncmp(arch, "powerpc", 7) || !strncmp(arch, "ppc", 3))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) return "powerpc";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) if (!strncmp(arch, "mips", 4))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) return "mips";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) if (!strncmp(arch, "sh", 2) && isdigit(arch[2]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) return "sh";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) return arch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) const char *perf_env__arch(struct perf_env *env)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) char *arch_name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) if (!env || !env->arch) { /* Assume local operation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) static struct utsname uts = { .machine[0] = '\0', };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) if (uts.machine[0] == '\0' && uname(&uts) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) arch_name = uts.machine;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) arch_name = env->arch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) return normalize_arch(arch_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) int perf_env__numa_node(struct perf_env *env, int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) if (!env->nr_numa_map) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) struct numa_node *nn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) int i, nr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) for (i = 0; i < env->nr_numa_nodes; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) nn = &env->numa_nodes[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) nr = max(nr, perf_cpu_map__max(nn->map));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) nr++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) * We initialize the numa_map array to prepare
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) * it for missing cpus, which return node -1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) env->numa_map = malloc(nr * sizeof(int));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) if (!env->numa_map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) for (i = 0; i < nr; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) env->numa_map[i] = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) env->nr_numa_map = nr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) for (i = 0; i < env->nr_numa_nodes; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) int tmp, j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) nn = &env->numa_nodes[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) perf_cpu_map__for_each_cpu(j, tmp, nn->map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) env->numa_map[j] = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) return cpu >= 0 && cpu < env->nr_numa_map ? env->numa_map[cpu] : -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) }