^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* SPDX-License-Identifier: GPL-2.0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) #ifndef __PERF_ENV_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) #define __PERF_ENV_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include <linux/rbtree.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include "rwsem.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) struct perf_cpu_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) struct cpu_topology_map {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) int socket_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) int die_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) int core_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) struct cpu_cache_level {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) u32 level;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) u32 line_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) u32 sets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) u32 ways;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) char *type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) char *size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) char *map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) struct numa_node {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) u32 node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) u64 mem_total;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) u64 mem_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) struct perf_cpu_map *map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) struct memory_node {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) u64 node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) u64 size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) unsigned long *set;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) struct perf_env {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) char *hostname;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) char *os_release;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) char *version;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) char *arch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) int nr_cpus_online;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) int nr_cpus_avail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) char *cpu_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) char *cpuid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) unsigned long long total_mem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) unsigned int msr_pmu_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) unsigned int max_branches;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) int nr_cmdline;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) int nr_sibling_cores;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) int nr_sibling_dies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) int nr_sibling_threads;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) int nr_numa_nodes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) int nr_memory_nodes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) int nr_pmu_mappings;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) int nr_groups;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) int nr_cpu_pmu_caps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) char *cmdline;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) const char **cmdline_argv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) char *sibling_cores;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) char *sibling_dies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) char *sibling_threads;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) char *pmu_mappings;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) char *cpu_pmu_caps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) struct cpu_topology_map *cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) struct cpu_cache_level *caches;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) int caches_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) u32 comp_ratio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) u32 comp_ver;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) u32 comp_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) u32 comp_level;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) u32 comp_mmap_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) struct numa_node *numa_nodes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) struct memory_node *memory_nodes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) unsigned long long memory_bsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) * bpf_info_lock protects bpf rbtrees. This is needed because the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) * trees are accessed by different threads in perf-top
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) struct rw_semaphore lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) struct rb_root infos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) u32 infos_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) struct rb_root btfs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) u32 btfs_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) } bpf_progs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) /* same reason as above (for perf-top) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) struct rw_semaphore lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) struct rb_root tree;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) } cgroups;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) /* For fast cpu to numa node lookup via perf_env__numa_node */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) int *numa_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) int nr_numa_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) /* For real clock time reference. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) u64 tod_ns;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) u64 clockid_ns;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) u64 clockid_res_ns;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) int clockid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) * enabled is valid for report mode, and is true if above
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) * values are set, it's set in process_clock_data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) bool enabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) } clock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) enum perf_compress_type {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) PERF_COMP_NONE = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) PERF_COMP_ZSTD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) PERF_COMP_MAX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) struct bpf_prog_info_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) struct btf_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) extern struct perf_env perf_env;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) void perf_env__exit(struct perf_env *env);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) int perf_env__set_cmdline(struct perf_env *env, int argc, const char *argv[]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) int perf_env__read_cpuid(struct perf_env *env);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) int perf_env__read_cpu_topology_map(struct perf_env *env);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) void cpu_cache_level__free(struct cpu_cache_level *cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) const char *perf_env__arch(struct perf_env *env);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) const char *perf_env__raw_arch(struct perf_env *env);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) int perf_env__nr_cpus_avail(struct perf_env *env);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) void perf_env__init(struct perf_env *env);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) void perf_env__insert_bpf_prog_info(struct perf_env *env,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) struct bpf_prog_info_node *info_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) struct bpf_prog_info_node *perf_env__find_bpf_prog_info(struct perf_env *env,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) __u32 prog_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) bool perf_env__insert_btf(struct perf_env *env, struct btf_node *btf_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) struct btf_node *perf_env__find_btf(struct perf_env *env, __u32 btf_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) int perf_env__numa_node(struct perf_env *env, int cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) #endif /* __PERF_ENV_H */