^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) #include <errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) #include <inttypes.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) #include "string2.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) #include <sys/param.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include <sys/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <byteswap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <unistd.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <stdio.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <stdlib.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/compiler.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/list.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/bitops.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/string.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/stringify.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/zalloc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <sys/stat.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <sys/utsname.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/time64.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <dirent.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <bpf/libbpf.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <perf/cpumap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include "dso.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include "evlist.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include "evsel.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include "util/evsel_fprintf.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include "header.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include "memswap.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #include "trace-event.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #include "session.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #include "symbol.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #include "debug.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #include "cpumap.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #include "pmu.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #include "vdso.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #include "strbuf.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #include "build-id.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #include "data.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #include <api/fs/fs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #include "asm/bug.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #include "tool.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) #include "time-utils.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #include "units.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) #include "util/util.h" // perf_exe()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) #include "cputopo.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) #include "bpf-event.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) #include "clockid.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) #include <linux/ctype.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) #include <internal/lib.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) * magic2 = "PERFILE2"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) * must be a numerical value to let the endianness
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) * determine the memory layout. That way we are able
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) * to detect endianness when reading the perf.data file
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) * back.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) * we check for legacy (PERFFILE) format.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) static const char *__perf_magic1 = "PERFFILE";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) static const u64 __perf_magic2 = 0x32454c4946524550ULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) static const u64 __perf_magic2_sw = 0x50455246494c4532ULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) #define PERF_MAGIC __perf_magic2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) const char perf_version_string[] = PERF_VERSION;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) struct perf_file_attr {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) struct perf_event_attr attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) struct perf_file_section ids;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) void perf_header__set_feat(struct perf_header *header, int feat)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) set_bit(feat, header->adds_features);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) void perf_header__clear_feat(struct perf_header *header, int feat)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) clear_bit(feat, header->adds_features);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) bool perf_header__has_feat(const struct perf_header *header, int feat)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) return test_bit(feat, header->adds_features);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) static int __do_write_fd(struct feat_fd *ff, const void *buf, size_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) ssize_t ret = writen(ff->fd, buf, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) if (ret != (ssize_t)size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) return ret < 0 ? (int)ret : -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) static int __do_write_buf(struct feat_fd *ff, const void *buf, size_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) /* struct perf_event_header::size is u16 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) const size_t max_size = 0xffff - sizeof(struct perf_event_header);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) size_t new_size = ff->size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) void *addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) if (size + ff->offset > max_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) return -E2BIG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) while (size > (new_size - ff->offset))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) new_size <<= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) new_size = min(max_size, new_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) if (ff->size < new_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) addr = realloc(ff->buf, new_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) if (!addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) ff->buf = addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) ff->size = new_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) memcpy(ff->buf + ff->offset, buf, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) ff->offset += size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) /* Return: 0 if succeded, -ERR if failed. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) int do_write(struct feat_fd *ff, const void *buf, size_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) if (!ff->buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) return __do_write_fd(ff, buf, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) return __do_write_buf(ff, buf, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) /* Return: 0 if succeded, -ERR if failed. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) static int do_write_bitmap(struct feat_fd *ff, unsigned long *set, u64 size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) u64 *p = (u64 *) set;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) int i, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) ret = do_write(ff, &size, sizeof(size));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) for (i = 0; (u64) i < BITS_TO_U64(size); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) ret = do_write(ff, p + i, sizeof(*p));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) /* Return: 0 if succeded, -ERR if failed. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) int write_padded(struct feat_fd *ff, const void *bf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) size_t count, size_t count_aligned)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) static const char zero_buf[NAME_ALIGN];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) int err = do_write(ff, bf, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) if (!err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) err = do_write(ff, zero_buf, count_aligned - count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) #define string_size(str) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) (PERF_ALIGN((strlen(str) + 1), NAME_ALIGN) + sizeof(u32))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) /* Return: 0 if succeded, -ERR if failed. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) static int do_write_string(struct feat_fd *ff, const char *str)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) u32 len, olen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) olen = strlen(str) + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) len = PERF_ALIGN(olen, NAME_ALIGN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) /* write len, incl. \0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) ret = do_write(ff, &len, sizeof(len));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) return write_padded(ff, str, olen, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) static int __do_read_fd(struct feat_fd *ff, void *addr, ssize_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) ssize_t ret = readn(ff->fd, addr, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) if (ret != size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) return ret < 0 ? (int)ret : -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) static int __do_read_buf(struct feat_fd *ff, void *addr, ssize_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) if (size > (ssize_t)ff->size - ff->offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) memcpy(addr, ff->buf + ff->offset, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) ff->offset += size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) static int __do_read(struct feat_fd *ff, void *addr, ssize_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) if (!ff->buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) return __do_read_fd(ff, addr, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) return __do_read_buf(ff, addr, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) static int do_read_u32(struct feat_fd *ff, u32 *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) ret = __do_read(ff, addr, sizeof(*addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) if (ff->ph->needs_swap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) *addr = bswap_32(*addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) static int do_read_u64(struct feat_fd *ff, u64 *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) ret = __do_read(ff, addr, sizeof(*addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) if (ff->ph->needs_swap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) *addr = bswap_64(*addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) static char *do_read_string(struct feat_fd *ff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) u32 len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) char *buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) if (do_read_u32(ff, &len))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) buf = malloc(len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) if (!buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) if (!__do_read(ff, buf, len)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) * strings are padded by zeroes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) * thus the actual strlen of buf
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) * may be less than len
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) return buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) free(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) /* Return: 0 if succeded, -ERR if failed. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) static int do_read_bitmap(struct feat_fd *ff, unsigned long **pset, u64 *psize)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) unsigned long *set;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) u64 size, *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) int i, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) ret = do_read_u64(ff, &size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) set = bitmap_alloc(size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) if (!set)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) p = (u64 *) set;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) for (i = 0; (u64) i < BITS_TO_U64(size); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) ret = do_read_u64(ff, p + i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) free(set);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) *pset = set;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) *psize = size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) static int write_tracing_data(struct feat_fd *ff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) struct evlist *evlist)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) if (WARN(ff->buf, "Error: calling %s in pipe-mode.\n", __func__))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) return read_tracing_data(ff->fd, &evlist->core.entries);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) static int write_build_id(struct feat_fd *ff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) struct evlist *evlist __maybe_unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) struct perf_session *session;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) session = container_of(ff->ph, struct perf_session, header);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) if (!perf_session__read_build_ids(session, true))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) if (WARN(ff->buf, "Error: calling %s in pipe-mode.\n", __func__))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) err = perf_session__write_buildid_table(session, ff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) pr_debug("failed to write buildid table\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) perf_session__cache_build_ids(session);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) static int write_hostname(struct feat_fd *ff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) struct evlist *evlist __maybe_unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) struct utsname uts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) ret = uname(&uts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) return do_write_string(ff, uts.nodename);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) static int write_osrelease(struct feat_fd *ff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) struct evlist *evlist __maybe_unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) struct utsname uts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) ret = uname(&uts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) return do_write_string(ff, uts.release);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) static int write_arch(struct feat_fd *ff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) struct evlist *evlist __maybe_unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) struct utsname uts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) ret = uname(&uts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) return do_write_string(ff, uts.machine);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) static int write_version(struct feat_fd *ff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) struct evlist *evlist __maybe_unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) return do_write_string(ff, perf_version_string);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) static int __write_cpudesc(struct feat_fd *ff, const char *cpuinfo_proc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) FILE *file;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) char *buf = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) char *s, *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) const char *search = cpuinfo_proc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) size_t len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) int ret = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) if (!search)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) file = fopen("/proc/cpuinfo", "r");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) if (!file)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) while (getline(&buf, &len, file) > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) ret = strncmp(buf, search, strlen(search));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) ret = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) s = buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) p = strchr(buf, ':');
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) if (p && *(p+1) == ' ' && *(p+2))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) s = p + 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) p = strchr(s, '\n');
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) if (p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) *p = '\0';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) /* squash extra space characters (branding string) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) p = s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) while (*p) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) if (isspace(*p)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) char *r = p + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) char *q = skip_spaces(r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) *p = ' ';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) if (q != (p+1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) while ((*r++ = *q++));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) p++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) ret = do_write_string(ff, s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) free(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) fclose(file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) static int write_cpudesc(struct feat_fd *ff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) struct evlist *evlist __maybe_unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) #if defined(__powerpc__) || defined(__hppa__) || defined(__sparc__)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) #define CPUINFO_PROC { "cpu", }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) #elif defined(__s390__)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) #define CPUINFO_PROC { "vendor_id", }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) #elif defined(__sh__)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) #define CPUINFO_PROC { "cpu type", }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) #elif defined(__alpha__) || defined(__mips__)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) #define CPUINFO_PROC { "cpu model", }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) #elif defined(__arm__)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) #define CPUINFO_PROC { "model name", "Processor", }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) #elif defined(__arc__)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) #define CPUINFO_PROC { "Processor", }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) #elif defined(__xtensa__)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) #define CPUINFO_PROC { "core ID", }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) #define CPUINFO_PROC { "model name", }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) const char *cpuinfo_procs[] = CPUINFO_PROC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) #undef CPUINFO_PROC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) for (i = 0; i < ARRAY_SIZE(cpuinfo_procs); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) ret = __write_cpudesc(ff, cpuinfo_procs[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) if (ret >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) static int write_nrcpus(struct feat_fd *ff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) struct evlist *evlist __maybe_unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) long nr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) u32 nrc, nra;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) nrc = cpu__max_present_cpu();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) nr = sysconf(_SC_NPROCESSORS_ONLN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) if (nr < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) nra = (u32)(nr & UINT_MAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) ret = do_write(ff, &nrc, sizeof(nrc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) return do_write(ff, &nra, sizeof(nra));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) static int write_event_desc(struct feat_fd *ff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) struct evlist *evlist)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) struct evsel *evsel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) u32 nre, nri, sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) nre = evlist->core.nr_entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) * write number of events
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) ret = do_write(ff, &nre, sizeof(nre));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) * size of perf_event_attr struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) sz = (u32)sizeof(evsel->core.attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) ret = do_write(ff, &sz, sizeof(sz));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) evlist__for_each_entry(evlist, evsel) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) ret = do_write(ff, &evsel->core.attr, sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) * write number of unique id per event
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) * there is one id per instance of an event
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) * copy into an nri to be independent of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) * type of ids,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) nri = evsel->core.ids;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) ret = do_write(ff, &nri, sizeof(nri));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) * write event string as passed on cmdline
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) ret = do_write_string(ff, evsel__name(evsel));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) * write unique ids for this event
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) ret = do_write(ff, evsel->core.id, evsel->core.ids * sizeof(u64));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) static int write_cmdline(struct feat_fd *ff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) struct evlist *evlist __maybe_unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) char pbuf[MAXPATHLEN], *buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) int i, ret, n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) /* actual path to perf binary */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) buf = perf_exe(pbuf, MAXPATHLEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) /* account for binary path */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) n = perf_env.nr_cmdline + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) ret = do_write(ff, &n, sizeof(n));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) ret = do_write_string(ff, buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) for (i = 0 ; i < perf_env.nr_cmdline; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) ret = do_write_string(ff, perf_env.cmdline_argv[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) static int write_cpu_topology(struct feat_fd *ff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) struct evlist *evlist __maybe_unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) struct cpu_topology *tp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) u32 i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) int ret, j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) tp = cpu_topology__new();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) if (!tp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) ret = do_write(ff, &tp->core_sib, sizeof(tp->core_sib));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) for (i = 0; i < tp->core_sib; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) ret = do_write_string(ff, tp->core_siblings[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) ret = do_write(ff, &tp->thread_sib, sizeof(tp->thread_sib));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) for (i = 0; i < tp->thread_sib; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) ret = do_write_string(ff, tp->thread_siblings[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) ret = perf_env__read_cpu_topology_map(&perf_env);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) for (j = 0; j < perf_env.nr_cpus_avail; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) ret = do_write(ff, &perf_env.cpu[j].core_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) sizeof(perf_env.cpu[j].core_id));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) ret = do_write(ff, &perf_env.cpu[j].socket_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) sizeof(perf_env.cpu[j].socket_id));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) if (!tp->die_sib)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) ret = do_write(ff, &tp->die_sib, sizeof(tp->die_sib));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) for (i = 0; i < tp->die_sib; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) ret = do_write_string(ff, tp->die_siblings[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) for (j = 0; j < perf_env.nr_cpus_avail; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) ret = do_write(ff, &perf_env.cpu[j].die_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) sizeof(perf_env.cpu[j].die_id));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) cpu_topology__delete(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) static int write_total_mem(struct feat_fd *ff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) struct evlist *evlist __maybe_unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) char *buf = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) FILE *fp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) size_t len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) int ret = -1, n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) uint64_t mem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) fp = fopen("/proc/meminfo", "r");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) if (!fp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) while (getline(&buf, &len, fp) > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) ret = strncmp(buf, "MemTotal:", 9);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) if (!ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) n = sscanf(buf, "%*s %"PRIu64, &mem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) if (n == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) ret = do_write(ff, &mem, sizeof(mem));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) ret = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) free(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) fclose(fp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) static int write_numa_topology(struct feat_fd *ff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) struct evlist *evlist __maybe_unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) struct numa_topology *tp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) int ret = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) u32 i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) tp = numa_topology__new();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) if (!tp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) ret = do_write(ff, &tp->nr, sizeof(u32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) for (i = 0; i < tp->nr; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) struct numa_topology_node *n = &tp->nodes[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) ret = do_write(ff, &n->node, sizeof(u32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) ret = do_write(ff, &n->mem_total, sizeof(u64));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) ret = do_write(ff, &n->mem_free, sizeof(u64));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) ret = do_write_string(ff, n->cpus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) numa_topology__delete(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) * File format:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) * struct pmu_mappings {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) * u32 pmu_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) * struct pmu_map {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) * u32 type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) * char name[];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) * }[pmu_num];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) * };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) static int write_pmu_mappings(struct feat_fd *ff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) struct evlist *evlist __maybe_unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) struct perf_pmu *pmu = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) u32 pmu_num = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) * Do a first pass to count number of pmu to avoid lseek so this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) * works in pipe mode as well.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) while ((pmu = perf_pmu__scan(pmu))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) if (!pmu->name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) pmu_num++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) ret = do_write(ff, &pmu_num, sizeof(pmu_num));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) while ((pmu = perf_pmu__scan(pmu))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) if (!pmu->name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) ret = do_write(ff, &pmu->type, sizeof(pmu->type));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) ret = do_write_string(ff, pmu->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) * File format:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) * struct group_descs {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) * u32 nr_groups;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) * struct group_desc {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) * char name[];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) * u32 leader_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) * u32 nr_members;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) * }[nr_groups];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) * };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) static int write_group_desc(struct feat_fd *ff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) struct evlist *evlist)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) u32 nr_groups = evlist->nr_groups;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) struct evsel *evsel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) ret = do_write(ff, &nr_groups, sizeof(nr_groups));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) evlist__for_each_entry(evlist, evsel) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) if (evsel__is_group_leader(evsel) && evsel->core.nr_members > 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) const char *name = evsel->group_name ?: "{anon_group}";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) u32 leader_idx = evsel->idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) u32 nr_members = evsel->core.nr_members;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) ret = do_write_string(ff, name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) ret = do_write(ff, &leader_idx, sizeof(leader_idx));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) ret = do_write(ff, &nr_members, sizeof(nr_members));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) * Return the CPU id as a raw string.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) * Each architecture should provide a more precise id string that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) * can be use to match the architecture's "mapfile".
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) char * __weak get_cpuid_str(struct perf_pmu *pmu __maybe_unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) /* Return zero when the cpuid from the mapfile.csv matches the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) * cpuid string generated on this platform.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) * Otherwise return non-zero.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) int __weak strcmp_cpuid_str(const char *mapcpuid, const char *cpuid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) regex_t re;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) regmatch_t pmatch[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) int match;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) if (regcomp(&re, mapcpuid, REG_EXTENDED) != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) /* Warn unable to generate match particular string. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) pr_info("Invalid regular expression %s\n", mapcpuid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) match = !regexec(&re, cpuid, 1, pmatch, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) regfree(&re);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) if (match) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) size_t match_len = (pmatch[0].rm_eo - pmatch[0].rm_so);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) /* Verify the entire string matched. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) if (match_len == strlen(cpuid))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) * default get_cpuid(): nothing gets recorded
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) * actual implementation must be in arch/$(SRCARCH)/util/header.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) int __weak get_cpuid(char *buffer __maybe_unused, size_t sz __maybe_unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) return ENOSYS; /* Not implemented */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) static int write_cpuid(struct feat_fd *ff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) struct evlist *evlist __maybe_unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) char buffer[64];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) ret = get_cpuid(buffer, sizeof(buffer));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) return do_write_string(ff, buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) static int write_branch_stack(struct feat_fd *ff __maybe_unused,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) struct evlist *evlist __maybe_unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) static int write_auxtrace(struct feat_fd *ff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) struct evlist *evlist __maybe_unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) struct perf_session *session;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) if (WARN(ff->buf, "Error: calling %s in pipe-mode.\n", __func__))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) session = container_of(ff->ph, struct perf_session, header);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) err = auxtrace_index__write(ff->fd, &session->auxtrace_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) pr_err("Failed to write auxtrace index\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) static int write_clockid(struct feat_fd *ff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) struct evlist *evlist __maybe_unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) return do_write(ff, &ff->ph->env.clock.clockid_res_ns,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) sizeof(ff->ph->env.clock.clockid_res_ns));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) static int write_clock_data(struct feat_fd *ff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) struct evlist *evlist __maybe_unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) u64 *data64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) u32 data32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) /* version */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) data32 = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) ret = do_write(ff, &data32, sizeof(data32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) /* clockid */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) data32 = ff->ph->env.clock.clockid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) ret = do_write(ff, &data32, sizeof(data32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) /* TOD ref time */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) data64 = &ff->ph->env.clock.tod_ns;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) ret = do_write(ff, data64, sizeof(*data64));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) /* clockid ref time */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) data64 = &ff->ph->env.clock.clockid_ns;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) return do_write(ff, data64, sizeof(*data64));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) static int write_dir_format(struct feat_fd *ff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) struct evlist *evlist __maybe_unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) struct perf_session *session;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) struct perf_data *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) session = container_of(ff->ph, struct perf_session, header);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) data = session->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) if (WARN_ON(!perf_data__is_dir(data)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) return do_write(ff, &data->dir.version, sizeof(data->dir.version));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) #ifdef HAVE_LIBBPF_SUPPORT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) static int write_bpf_prog_info(struct feat_fd *ff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) struct evlist *evlist __maybe_unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) struct perf_env *env = &ff->ph->env;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) struct rb_root *root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) struct rb_node *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) down_read(&env->bpf_progs.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) ret = do_write(ff, &env->bpf_progs.infos_cnt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) sizeof(env->bpf_progs.infos_cnt));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) root = &env->bpf_progs.infos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) next = rb_first(root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) while (next) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) struct bpf_prog_info_node *node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) size_t len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) node = rb_entry(next, struct bpf_prog_info_node, rb_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) next = rb_next(&node->rb_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) len = sizeof(struct bpf_prog_info_linear) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) node->info_linear->data_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) /* before writing to file, translate address to offset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) bpf_program__bpil_addr_to_offs(node->info_linear);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) ret = do_write(ff, node->info_linear, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) * translate back to address even when do_write() fails,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) * so that this function never changes the data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) bpf_program__bpil_offs_to_addr(node->info_linear);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) up_read(&env->bpf_progs.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) #else // HAVE_LIBBPF_SUPPORT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) static int write_bpf_prog_info(struct feat_fd *ff __maybe_unused,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) struct evlist *evlist __maybe_unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) #endif // HAVE_LIBBPF_SUPPORT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) static int write_bpf_btf(struct feat_fd *ff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) struct evlist *evlist __maybe_unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) struct perf_env *env = &ff->ph->env;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) struct rb_root *root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) struct rb_node *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) down_read(&env->bpf_progs.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) ret = do_write(ff, &env->bpf_progs.btfs_cnt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) sizeof(env->bpf_progs.btfs_cnt));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) root = &env->bpf_progs.btfs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) next = rb_first(root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) while (next) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) struct btf_node *node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) node = rb_entry(next, struct btf_node, rb_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) next = rb_next(&node->rb_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) ret = do_write(ff, &node->id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) sizeof(u32) * 2 + node->data_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) up_read(&env->bpf_progs.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) static int cpu_cache_level__sort(const void *a, const void *b)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) struct cpu_cache_level *cache_a = (struct cpu_cache_level *)a;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) struct cpu_cache_level *cache_b = (struct cpu_cache_level *)b;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) return cache_a->level - cache_b->level;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) static bool cpu_cache_level__cmp(struct cpu_cache_level *a, struct cpu_cache_level *b)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) if (a->level != b->level)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) if (a->line_size != b->line_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) if (a->sets != b->sets)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) if (a->ways != b->ways)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) if (strcmp(a->type, b->type))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) if (strcmp(a->size, b->size))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) if (strcmp(a->map, b->map))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) static int cpu_cache_level__read(struct cpu_cache_level *cache, u32 cpu, u16 level)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) char path[PATH_MAX], file[PATH_MAX];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) struct stat st;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) size_t len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) scnprintf(path, PATH_MAX, "devices/system/cpu/cpu%d/cache/index%d/", cpu, level);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) scnprintf(file, PATH_MAX, "%s/%s", sysfs__mountpoint(), path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) if (stat(file, &st))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) scnprintf(file, PATH_MAX, "%s/level", path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) if (sysfs__read_int(file, (int *) &cache->level))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) scnprintf(file, PATH_MAX, "%s/coherency_line_size", path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) if (sysfs__read_int(file, (int *) &cache->line_size))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) scnprintf(file, PATH_MAX, "%s/number_of_sets", path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) if (sysfs__read_int(file, (int *) &cache->sets))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) scnprintf(file, PATH_MAX, "%s/ways_of_associativity", path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) if (sysfs__read_int(file, (int *) &cache->ways))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) scnprintf(file, PATH_MAX, "%s/type", path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) if (sysfs__read_str(file, &cache->type, &len))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) cache->type[len] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) cache->type = strim(cache->type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) scnprintf(file, PATH_MAX, "%s/size", path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) if (sysfs__read_str(file, &cache->size, &len)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) zfree(&cache->type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) cache->size[len] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) cache->size = strim(cache->size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) scnprintf(file, PATH_MAX, "%s/shared_cpu_list", path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) if (sysfs__read_str(file, &cache->map, &len)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) zfree(&cache->size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) zfree(&cache->type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) cache->map[len] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) cache->map = strim(cache->map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) static void cpu_cache_level__fprintf(FILE *out, struct cpu_cache_level *c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) fprintf(out, "L%d %-15s %8s [%s]\n", c->level, c->type, c->size, c->map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) #define MAX_CACHE_LVL 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) static int build_caches(struct cpu_cache_level caches[], u32 *cntp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) u32 i, cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) u32 nr, cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) u16 level;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) nr = cpu__max_cpu();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) for (cpu = 0; cpu < nr; cpu++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) for (level = 0; level < MAX_CACHE_LVL; level++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) struct cpu_cache_level c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) err = cpu_cache_level__read(&c, cpu, level);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) if (err == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) for (i = 0; i < cnt; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) if (cpu_cache_level__cmp(&c, &caches[i]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) if (i == cnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) caches[cnt++] = c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) cpu_cache_level__free(&c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) *cntp = cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) static int write_cache(struct feat_fd *ff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) struct evlist *evlist __maybe_unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) u32 max_caches = cpu__max_cpu() * MAX_CACHE_LVL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) struct cpu_cache_level caches[max_caches];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) u32 cnt = 0, i, version = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) ret = build_caches(caches, &cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) qsort(&caches, cnt, sizeof(struct cpu_cache_level), cpu_cache_level__sort);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) ret = do_write(ff, &version, sizeof(u32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) ret = do_write(ff, &cnt, sizeof(u32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) for (i = 0; i < cnt; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) struct cpu_cache_level *c = &caches[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) #define _W(v) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) ret = do_write(ff, &c->v, sizeof(u32)); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) if (ret < 0) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) _W(level)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) _W(line_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) _W(sets)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) _W(ways)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) #undef _W
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) #define _W(v) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) ret = do_write_string(ff, (const char *) c->v); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) if (ret < 0) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) _W(type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) _W(size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) _W(map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) #undef _W
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) for (i = 0; i < cnt; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) cpu_cache_level__free(&caches[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) static int write_stat(struct feat_fd *ff __maybe_unused,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) struct evlist *evlist __maybe_unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) static int write_sample_time(struct feat_fd *ff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) struct evlist *evlist)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) ret = do_write(ff, &evlist->first_sample_time,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) sizeof(evlist->first_sample_time));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) return do_write(ff, &evlist->last_sample_time,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) sizeof(evlist->last_sample_time));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) static int memory_node__read(struct memory_node *n, unsigned long idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) unsigned int phys, size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) char path[PATH_MAX];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) struct dirent *ent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) DIR *dir;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) #define for_each_memory(mem, dir) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) while ((ent = readdir(dir))) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) if (strcmp(ent->d_name, ".") && \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) strcmp(ent->d_name, "..") && \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) sscanf(ent->d_name, "memory%u", &mem) == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) scnprintf(path, PATH_MAX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) "%s/devices/system/node/node%lu",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) sysfs__mountpoint(), idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) dir = opendir(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) if (!dir) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) pr_warning("failed: cant' open memory sysfs data\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) for_each_memory(phys, dir) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) size = max(phys, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) size++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) n->set = bitmap_alloc(size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) if (!n->set) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) closedir(dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) n->node = idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) n->size = size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) rewinddir(dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) for_each_memory(phys, dir) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) set_bit(phys, n->set);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) closedir(dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) static int memory_node__sort(const void *a, const void *b)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) const struct memory_node *na = a;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) const struct memory_node *nb = b;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) return na->node - nb->node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) static int build_mem_topology(struct memory_node *nodes, u64 size, u64 *cntp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) char path[PATH_MAX];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) struct dirent *ent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) DIR *dir;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) u64 cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) scnprintf(path, PATH_MAX, "%s/devices/system/node/",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) sysfs__mountpoint());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) dir = opendir(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) if (!dir) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) pr_debug2("%s: could't read %s, does this arch have topology information?\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) __func__, path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) while (!ret && (ent = readdir(dir))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) unsigned int idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) if (!strcmp(ent->d_name, ".") ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) !strcmp(ent->d_name, ".."))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) r = sscanf(ent->d_name, "node%u", &idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) if (r != 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) if (WARN_ONCE(cnt >= size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) "failed to write MEM_TOPOLOGY, way too many nodes\n")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) closedir(dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) ret = memory_node__read(&nodes[cnt++], idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) *cntp = cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) closedir(dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) qsort(nodes, cnt, sizeof(nodes[0]), memory_node__sort);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) #define MAX_MEMORY_NODES 2000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) * The MEM_TOPOLOGY holds physical memory map for every
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) * node in system. The format of data is as follows:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) * 0 - version | for future changes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) * 8 - block_size_bytes | /sys/devices/system/memory/block_size_bytes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) * 16 - count | number of nodes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) * For each node we store map of physical indexes for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) * each node:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) * 32 - node id | node index
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) * 40 - size | size of bitmap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) * 48 - bitmap | bitmap of memory indexes that belongs to node
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) static int write_mem_topology(struct feat_fd *ff __maybe_unused,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) struct evlist *evlist __maybe_unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) static struct memory_node nodes[MAX_MEMORY_NODES];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) u64 bsize, version = 1, i, nr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) ret = sysfs__read_xll("devices/system/memory/block_size_bytes",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) (unsigned long long *) &bsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) ret = build_mem_topology(&nodes[0], MAX_MEMORY_NODES, &nr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) ret = do_write(ff, &version, sizeof(version));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) ret = do_write(ff, &bsize, sizeof(bsize));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) ret = do_write(ff, &nr, sizeof(nr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) for (i = 0; i < nr; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) struct memory_node *n = &nodes[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) #define _W(v) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) ret = do_write(ff, &n->v, sizeof(n->v)); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) if (ret < 0) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) _W(node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) _W(size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) #undef _W
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) ret = do_write_bitmap(ff, n->set, n->size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) static int write_compressed(struct feat_fd *ff __maybe_unused,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) struct evlist *evlist __maybe_unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) ret = do_write(ff, &(ff->ph->env.comp_ver), sizeof(ff->ph->env.comp_ver));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) ret = do_write(ff, &(ff->ph->env.comp_type), sizeof(ff->ph->env.comp_type));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) ret = do_write(ff, &(ff->ph->env.comp_level), sizeof(ff->ph->env.comp_level));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) ret = do_write(ff, &(ff->ph->env.comp_ratio), sizeof(ff->ph->env.comp_ratio));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) return do_write(ff, &(ff->ph->env.comp_mmap_len), sizeof(ff->ph->env.comp_mmap_len));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) static int write_cpu_pmu_caps(struct feat_fd *ff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) struct evlist *evlist __maybe_unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) struct perf_pmu *cpu_pmu = perf_pmu__find("cpu");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) struct perf_pmu_caps *caps = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) int nr_caps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) if (!cpu_pmu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) nr_caps = perf_pmu__caps_parse(cpu_pmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) if (nr_caps < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) return nr_caps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) ret = do_write(ff, &nr_caps, sizeof(nr_caps));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) list_for_each_entry(caps, &cpu_pmu->caps, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) ret = do_write_string(ff, caps->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) ret = do_write_string(ff, caps->value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) static void print_hostname(struct feat_fd *ff, FILE *fp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) fprintf(fp, "# hostname : %s\n", ff->ph->env.hostname);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) static void print_osrelease(struct feat_fd *ff, FILE *fp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) fprintf(fp, "# os release : %s\n", ff->ph->env.os_release);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) static void print_arch(struct feat_fd *ff, FILE *fp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) fprintf(fp, "# arch : %s\n", ff->ph->env.arch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) static void print_cpudesc(struct feat_fd *ff, FILE *fp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) fprintf(fp, "# cpudesc : %s\n", ff->ph->env.cpu_desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) static void print_nrcpus(struct feat_fd *ff, FILE *fp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) fprintf(fp, "# nrcpus online : %u\n", ff->ph->env.nr_cpus_online);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) fprintf(fp, "# nrcpus avail : %u\n", ff->ph->env.nr_cpus_avail);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) static void print_version(struct feat_fd *ff, FILE *fp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) fprintf(fp, "# perf version : %s\n", ff->ph->env.version);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) static void print_cmdline(struct feat_fd *ff, FILE *fp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) int nr, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) nr = ff->ph->env.nr_cmdline;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) fprintf(fp, "# cmdline : ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) for (i = 0; i < nr; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) char *argv_i = strdup(ff->ph->env.cmdline_argv[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) if (!argv_i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) fprintf(fp, "%s ", ff->ph->env.cmdline_argv[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) char *mem = argv_i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) char *quote = strchr(argv_i, '\'');
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) if (!quote)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) *quote++ = '\0';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) fprintf(fp, "%s\\\'", argv_i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) argv_i = quote;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) } while (1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) fprintf(fp, "%s ", argv_i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) free(mem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) fputc('\n', fp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) static void print_cpu_topology(struct feat_fd *ff, FILE *fp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) struct perf_header *ph = ff->ph;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) int cpu_nr = ph->env.nr_cpus_avail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) int nr, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) char *str;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) nr = ph->env.nr_sibling_cores;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) str = ph->env.sibling_cores;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) for (i = 0; i < nr; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) fprintf(fp, "# sibling sockets : %s\n", str);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) str += strlen(str) + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) if (ph->env.nr_sibling_dies) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) nr = ph->env.nr_sibling_dies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) str = ph->env.sibling_dies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) for (i = 0; i < nr; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) fprintf(fp, "# sibling dies : %s\n", str);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) str += strlen(str) + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) nr = ph->env.nr_sibling_threads;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) str = ph->env.sibling_threads;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) for (i = 0; i < nr; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) fprintf(fp, "# sibling threads : %s\n", str);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) str += strlen(str) + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) if (ph->env.nr_sibling_dies) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) if (ph->env.cpu != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) for (i = 0; i < cpu_nr; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) fprintf(fp, "# CPU %d: Core ID %d, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) "Die ID %d, Socket ID %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) i, ph->env.cpu[i].core_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) ph->env.cpu[i].die_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) ph->env.cpu[i].socket_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) fprintf(fp, "# Core ID, Die ID and Socket ID "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) "information is not available\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) if (ph->env.cpu != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) for (i = 0; i < cpu_nr; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) fprintf(fp, "# CPU %d: Core ID %d, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) "Socket ID %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) i, ph->env.cpu[i].core_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) ph->env.cpu[i].socket_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) fprintf(fp, "# Core ID and Socket ID "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) "information is not available\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) static void print_clockid(struct feat_fd *ff, FILE *fp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) fprintf(fp, "# clockid frequency: %"PRIu64" MHz\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) ff->ph->env.clock.clockid_res_ns * 1000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) static void print_clock_data(struct feat_fd *ff, FILE *fp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) struct timespec clockid_ns;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) char tstr[64], date[64];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) struct timeval tod_ns;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) clockid_t clockid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) struct tm ltime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) u64 ref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) if (!ff->ph->env.clock.enabled) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) fprintf(fp, "# reference time disabled\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) /* Compute TOD time. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) ref = ff->ph->env.clock.tod_ns;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) tod_ns.tv_sec = ref / NSEC_PER_SEC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) ref -= tod_ns.tv_sec * NSEC_PER_SEC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) tod_ns.tv_usec = ref / NSEC_PER_USEC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) /* Compute clockid time. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) ref = ff->ph->env.clock.clockid_ns;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) clockid_ns.tv_sec = ref / NSEC_PER_SEC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) ref -= clockid_ns.tv_sec * NSEC_PER_SEC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) clockid_ns.tv_nsec = ref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) clockid = ff->ph->env.clock.clockid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) if (localtime_r(&tod_ns.tv_sec, <ime) == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) snprintf(tstr, sizeof(tstr), "<error>");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) strftime(date, sizeof(date), "%F %T", <ime);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) scnprintf(tstr, sizeof(tstr), "%s.%06d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) date, (int) tod_ns.tv_usec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) fprintf(fp, "# clockid: %s (%u)\n", clockid_name(clockid), clockid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) fprintf(fp, "# reference time: %s = %ld.%06d (TOD) = %ld.%09ld (%s)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) tstr, tod_ns.tv_sec, (int) tod_ns.tv_usec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) clockid_ns.tv_sec, clockid_ns.tv_nsec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) clockid_name(clockid));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) static void print_dir_format(struct feat_fd *ff, FILE *fp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) struct perf_session *session;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) struct perf_data *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) session = container_of(ff->ph, struct perf_session, header);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) data = session->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) fprintf(fp, "# directory data version : %"PRIu64"\n", data->dir.version);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) static void print_bpf_prog_info(struct feat_fd *ff, FILE *fp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) struct perf_env *env = &ff->ph->env;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) struct rb_root *root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) struct rb_node *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) down_read(&env->bpf_progs.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) root = &env->bpf_progs.infos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) next = rb_first(root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) while (next) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) struct bpf_prog_info_node *node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) node = rb_entry(next, struct bpf_prog_info_node, rb_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) next = rb_next(&node->rb_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) bpf_event__print_bpf_prog_info(&node->info_linear->info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) env, fp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) up_read(&env->bpf_progs.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) static void print_bpf_btf(struct feat_fd *ff, FILE *fp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) struct perf_env *env = &ff->ph->env;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) struct rb_root *root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) struct rb_node *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) down_read(&env->bpf_progs.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) root = &env->bpf_progs.btfs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) next = rb_first(root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) while (next) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) struct btf_node *node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) node = rb_entry(next, struct btf_node, rb_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) next = rb_next(&node->rb_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) fprintf(fp, "# btf info of id %u\n", node->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) up_read(&env->bpf_progs.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) static void free_event_desc(struct evsel *events)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) struct evsel *evsel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) if (!events)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) for (evsel = events; evsel->core.attr.size; evsel++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) zfree(&evsel->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) zfree(&evsel->core.id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) free(events);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) static bool perf_attr_check(struct perf_event_attr *attr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) if (attr->__reserved_1 || attr->__reserved_2 || attr->__reserved_3) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) pr_warning("Reserved bits are set unexpectedly. "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) "Please update perf tool.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) if (attr->sample_type & ~(PERF_SAMPLE_MAX-1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) pr_warning("Unknown sample type (0x%llx) is detected. "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) "Please update perf tool.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) attr->sample_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) if (attr->read_format & ~(PERF_FORMAT_MAX-1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) pr_warning("Unknown read format (0x%llx) is detected. "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) "Please update perf tool.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) attr->read_format);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) if ((attr->sample_type & PERF_SAMPLE_BRANCH_STACK) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) (attr->branch_sample_type & ~(PERF_SAMPLE_BRANCH_MAX-1))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) pr_warning("Unknown branch sample type (0x%llx) is detected. "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) "Please update perf tool.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) attr->branch_sample_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) static struct evsel *read_event_desc(struct feat_fd *ff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) struct evsel *evsel, *events = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) u64 *id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) void *buf = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) u32 nre, sz, nr, i, j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) size_t msz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) /* number of events */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) if (do_read_u32(ff, &nre))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) if (do_read_u32(ff, &sz))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) /* buffer to hold on file attr struct */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) buf = malloc(sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) if (!buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) /* the last event terminates with evsel->core.attr.size == 0: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) events = calloc(nre + 1, sizeof(*events));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) if (!events)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) msz = sizeof(evsel->core.attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) if (sz < msz)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) msz = sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) for (i = 0, evsel = events; i < nre; evsel++, i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) evsel->idx = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) * must read entire on-file attr struct to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) * sync up with layout.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) if (__do_read(ff, buf, sz))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) if (ff->ph->needs_swap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) perf_event__attr_swap(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) memcpy(&evsel->core.attr, buf, msz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) if (!perf_attr_check(&evsel->core.attr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) if (do_read_u32(ff, &nr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) if (ff->ph->needs_swap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) evsel->needs_swap = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) evsel->name = do_read_string(ff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) if (!evsel->name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) if (!nr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) id = calloc(nr, sizeof(*id));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) if (!id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) evsel->core.ids = nr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) evsel->core.id = id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) for (j = 0 ; j < nr; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) if (do_read_u64(ff, id))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) id++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) free(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) return events;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) free_event_desc(events);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) events = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) static int __desc_attr__fprintf(FILE *fp, const char *name, const char *val,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) void *priv __maybe_unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) return fprintf(fp, ", %s = %s", name, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) static void print_event_desc(struct feat_fd *ff, FILE *fp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) struct evsel *evsel, *events;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) u32 j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) u64 *id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) if (ff->events)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) events = ff->events;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) events = read_event_desc(ff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) if (!events) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) fprintf(fp, "# event desc: not available or unable to read\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) for (evsel = events; evsel->core.attr.size; evsel++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) fprintf(fp, "# event : name = %s, ", evsel->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) if (evsel->core.ids) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) fprintf(fp, ", id = {");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) for (j = 0, id = evsel->core.id; j < evsel->core.ids; j++, id++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) if (j)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) fputc(',', fp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) fprintf(fp, " %"PRIu64, *id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) fprintf(fp, " }");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) perf_event_attr__fprintf(fp, &evsel->core.attr, __desc_attr__fprintf, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) fputc('\n', fp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) free_event_desc(events);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) ff->events = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) static void print_total_mem(struct feat_fd *ff, FILE *fp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) fprintf(fp, "# total memory : %llu kB\n", ff->ph->env.total_mem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) static void print_numa_topology(struct feat_fd *ff, FILE *fp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) struct numa_node *n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) for (i = 0; i < ff->ph->env.nr_numa_nodes; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) n = &ff->ph->env.numa_nodes[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) fprintf(fp, "# node%u meminfo : total = %"PRIu64" kB,"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) " free = %"PRIu64" kB\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) n->node, n->mem_total, n->mem_free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) fprintf(fp, "# node%u cpu list : ", n->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) cpu_map__fprintf(n->map, fp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) static void print_cpuid(struct feat_fd *ff, FILE *fp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) fprintf(fp, "# cpuid : %s\n", ff->ph->env.cpuid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) static void print_branch_stack(struct feat_fd *ff __maybe_unused, FILE *fp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) fprintf(fp, "# contains samples with branch stack\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) static void print_auxtrace(struct feat_fd *ff __maybe_unused, FILE *fp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) fprintf(fp, "# contains AUX area data (e.g. instruction trace)\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) static void print_stat(struct feat_fd *ff __maybe_unused, FILE *fp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) fprintf(fp, "# contains stat data\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) static void print_cache(struct feat_fd *ff, FILE *fp __maybe_unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) fprintf(fp, "# CPU cache info:\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) for (i = 0; i < ff->ph->env.caches_cnt; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) fprintf(fp, "# ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) cpu_cache_level__fprintf(fp, &ff->ph->env.caches[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) static void print_compressed(struct feat_fd *ff, FILE *fp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) fprintf(fp, "# compressed : %s, level = %d, ratio = %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) ff->ph->env.comp_type == PERF_COMP_ZSTD ? "Zstd" : "Unknown",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) ff->ph->env.comp_level, ff->ph->env.comp_ratio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) static void print_cpu_pmu_caps(struct feat_fd *ff, FILE *fp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) const char *delimiter = "# cpu pmu capabilities: ";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) u32 nr_caps = ff->ph->env.nr_cpu_pmu_caps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) char *str;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) if (!nr_caps) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) fprintf(fp, "# cpu pmu capabilities: not available\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) str = ff->ph->env.cpu_pmu_caps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) while (nr_caps--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) fprintf(fp, "%s%s", delimiter, str);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) delimiter = ", ";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) str += strlen(str) + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) fprintf(fp, "\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) static void print_pmu_mappings(struct feat_fd *ff, FILE *fp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) const char *delimiter = "# pmu mappings: ";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) char *str, *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) u32 pmu_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) u32 type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) pmu_num = ff->ph->env.nr_pmu_mappings;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) if (!pmu_num) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) fprintf(fp, "# pmu mappings: not available\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) str = ff->ph->env.pmu_mappings;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) while (pmu_num) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) type = strtoul(str, &tmp, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) if (*tmp != ':')
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) str = tmp + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) fprintf(fp, "%s%s = %" PRIu32, delimiter, str, type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) delimiter = ", ";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) str += strlen(str) + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) pmu_num--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) fprintf(fp, "\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) if (!pmu_num)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) fprintf(fp, "# pmu mappings: unable to read\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) static void print_group_desc(struct feat_fd *ff, FILE *fp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) struct perf_session *session;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) struct evsel *evsel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) u32 nr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) session = container_of(ff->ph, struct perf_session, header);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) evlist__for_each_entry(session->evlist, evsel) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) if (evsel__is_group_leader(evsel) && evsel->core.nr_members > 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) fprintf(fp, "# group: %s{%s", evsel->group_name ?: "", evsel__name(evsel));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) nr = evsel->core.nr_members - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) } else if (nr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) fprintf(fp, ",%s", evsel__name(evsel));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) if (--nr == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) fprintf(fp, "}\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) static void print_sample_time(struct feat_fd *ff, FILE *fp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) struct perf_session *session;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) char time_buf[32];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) double d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) session = container_of(ff->ph, struct perf_session, header);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) timestamp__scnprintf_usec(session->evlist->first_sample_time,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) time_buf, sizeof(time_buf));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) fprintf(fp, "# time of first sample : %s\n", time_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) timestamp__scnprintf_usec(session->evlist->last_sample_time,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) time_buf, sizeof(time_buf));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) fprintf(fp, "# time of last sample : %s\n", time_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) d = (double)(session->evlist->last_sample_time -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) session->evlist->first_sample_time) / NSEC_PER_MSEC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) fprintf(fp, "# sample duration : %10.3f ms\n", d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) static void memory_node__fprintf(struct memory_node *n,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) unsigned long long bsize, FILE *fp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) char buf_map[100], buf_size[50];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) unsigned long long size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) size = bsize * bitmap_weight(n->set, n->size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) unit_number__scnprintf(buf_size, 50, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) bitmap_scnprintf(n->set, n->size, buf_map, 100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) fprintf(fp, "# %3" PRIu64 " [%s]: %s\n", n->node, buf_size, buf_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) static void print_mem_topology(struct feat_fd *ff, FILE *fp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) struct memory_node *nodes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) int i, nr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) nodes = ff->ph->env.memory_nodes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) nr = ff->ph->env.nr_memory_nodes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) fprintf(fp, "# memory nodes (nr %d, block size 0x%llx):\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) nr, ff->ph->env.memory_bsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) for (i = 0; i < nr; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) memory_node__fprintf(&nodes[i], ff->ph->env.memory_bsize, fp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) static int __event_process_build_id(struct perf_record_header_build_id *bev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) char *filename,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) struct perf_session *session)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) int err = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) struct machine *machine;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) u16 cpumode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) struct dso *dso;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) enum dso_space_type dso_space;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) machine = perf_session__findnew_machine(session, bev->pid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) if (!machine)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) cpumode = bev->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) switch (cpumode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) case PERF_RECORD_MISC_KERNEL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) dso_space = DSO_SPACE__KERNEL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) case PERF_RECORD_MISC_GUEST_KERNEL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) dso_space = DSO_SPACE__KERNEL_GUEST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) case PERF_RECORD_MISC_USER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) case PERF_RECORD_MISC_GUEST_USER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) dso_space = DSO_SPACE__USER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) dso = machine__findnew_dso(machine, filename);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) if (dso != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) char sbuild_id[SBUILD_ID_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) struct build_id bid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) size_t size = BUILD_ID_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) if (bev->header.misc & PERF_RECORD_MISC_BUILD_ID_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) size = bev->size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) build_id__init(&bid, bev->data, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) dso__set_build_id(dso, &bid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) if (dso_space != DSO_SPACE__USER) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) struct kmod_path m = { .name = NULL, };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) if (!kmod_path__parse_name(&m, filename) && m.kmod)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) dso__set_module_info(dso, &m, machine);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) dso->kernel = dso_space;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) free(m.name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) build_id__sprintf(&dso->bid, sbuild_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) pr_debug("build id event received for %s: %s [%zu]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) dso->long_name, sbuild_id, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) dso__put(dso);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) static int perf_header__read_build_ids_abi_quirk(struct perf_header *header,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) int input, u64 offset, u64 size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) struct perf_session *session = container_of(header, struct perf_session, header);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) struct perf_event_header header;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) u8 build_id[PERF_ALIGN(BUILD_ID_SIZE, sizeof(u64))];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) char filename[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) } old_bev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) struct perf_record_header_build_id bev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) char filename[PATH_MAX];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) u64 limit = offset + size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) while (offset < limit) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) ssize_t len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) if (readn(input, &old_bev, sizeof(old_bev)) != sizeof(old_bev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) if (header->needs_swap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) perf_event_header__bswap(&old_bev.header);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) len = old_bev.header.size - sizeof(old_bev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) if (readn(input, filename, len) != len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) bev.header = old_bev.header;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) * As the pid is the missing value, we need to fill
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) * it properly. The header.misc value give us nice hint.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) bev.pid = HOST_KERNEL_ID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) if (bev.header.misc == PERF_RECORD_MISC_GUEST_USER ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) bev.header.misc == PERF_RECORD_MISC_GUEST_KERNEL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) bev.pid = DEFAULT_GUEST_KERNEL_ID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) memcpy(bev.build_id, old_bev.build_id, sizeof(bev.build_id));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153) __event_process_build_id(&bev, filename, session);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) offset += bev.header.size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161) static int perf_header__read_build_ids(struct perf_header *header,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) int input, u64 offset, u64 size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) struct perf_session *session = container_of(header, struct perf_session, header);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) struct perf_record_header_build_id bev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166) char filename[PATH_MAX];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) u64 limit = offset + size, orig_offset = offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) int err = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) while (offset < limit) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) ssize_t len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) if (readn(input, &bev, sizeof(bev)) != sizeof(bev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) if (header->needs_swap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177) perf_event_header__bswap(&bev.header);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) len = bev.header.size - sizeof(bev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) if (readn(input, filename, len) != len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183) * The a1645ce1 changeset:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) * "perf: 'perf kvm' tool for monitoring guest performance from host"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) * Added a field to struct perf_record_header_build_id that broke the file
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) * format.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) * Since the kernel build-id is the first entry, process the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) * table using the old format if the well known
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) * '[kernel.kallsyms]' string for the kernel build-id has the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) * first 4 characters chopped off (where the pid_t sits).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) if (memcmp(filename, "nel.kallsyms]", 13) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) if (lseek(input, orig_offset, SEEK_SET) == (off_t)-1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) return perf_header__read_build_ids_abi_quirk(header, input, offset, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201) __event_process_build_id(&bev, filename, session);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) offset += bev.header.size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) /* Macro for features that simply need to read and store a string. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211) #define FEAT_PROCESS_STR_FUN(__feat, __feat_env) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212) static int process_##__feat(struct feat_fd *ff, void *data __maybe_unused) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) {\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214) ff->ph->env.__feat_env = do_read_string(ff); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215) return ff->ph->env.__feat_env ? 0 : -ENOMEM; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) FEAT_PROCESS_STR_FUN(hostname, hostname);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219) FEAT_PROCESS_STR_FUN(osrelease, os_release);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220) FEAT_PROCESS_STR_FUN(version, version);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221) FEAT_PROCESS_STR_FUN(arch, arch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) FEAT_PROCESS_STR_FUN(cpudesc, cpu_desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223) FEAT_PROCESS_STR_FUN(cpuid, cpuid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) static int process_tracing_data(struct feat_fd *ff, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227) ssize_t ret = trace_report(ff->fd, data, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229) return ret < 0 ? -1 : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232) static int process_build_id(struct feat_fd *ff, void *data __maybe_unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234) if (perf_header__read_build_ids(ff->ph, ff->fd, ff->offset, ff->size))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) pr_debug("Failed to read buildids, continuing...\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239) static int process_nrcpus(struct feat_fd *ff, void *data __maybe_unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242) u32 nr_cpus_avail, nr_cpus_online;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244) ret = do_read_u32(ff, &nr_cpus_avail);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248) ret = do_read_u32(ff, &nr_cpus_online);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251) ff->ph->env.nr_cpus_avail = (int)nr_cpus_avail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252) ff->ph->env.nr_cpus_online = (int)nr_cpus_online;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256) static int process_total_mem(struct feat_fd *ff, void *data __maybe_unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258) u64 total_mem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261) ret = do_read_u64(ff, &total_mem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264) ff->ph->env.total_mem = (unsigned long long)total_mem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268) static struct evsel *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269) perf_evlist__find_by_index(struct evlist *evlist, int idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271) struct evsel *evsel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273) evlist__for_each_entry(evlist, evsel) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274) if (evsel->idx == idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275) return evsel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) perf_evlist__set_event_name(struct evlist *evlist,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283) struct evsel *event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285) struct evsel *evsel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287) if (!event->name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290) evsel = perf_evlist__find_by_index(evlist, event->idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291) if (!evsel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294) if (evsel->name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297) evsel->name = strdup(event->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301) process_event_desc(struct feat_fd *ff, void *data __maybe_unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303) struct perf_session *session;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304) struct evsel *evsel, *events = read_event_desc(ff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306) if (!events)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309) session = container_of(ff->ph, struct perf_session, header);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311) if (session->data->is_pipe) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312) /* Save events for reading later by print_event_desc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313) * since they can't be read again in pipe mode. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314) ff->events = events;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317) for (evsel = events; evsel->core.attr.size; evsel++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318) perf_evlist__set_event_name(session->evlist, evsel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320) if (!session->data->is_pipe)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321) free_event_desc(events);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326) static int process_cmdline(struct feat_fd *ff, void *data __maybe_unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328) char *str, *cmdline = NULL, **argv = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329) u32 nr, i, len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331) if (do_read_u32(ff, &nr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334) ff->ph->env.nr_cmdline = nr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336) cmdline = zalloc(ff->size + nr + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337) if (!cmdline)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340) argv = zalloc(sizeof(char *) * (nr + 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341) if (!argv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344) for (i = 0; i < nr; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2345) str = do_read_string(ff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2346) if (!str)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2347) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2349) argv[i] = cmdline + len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2350) memcpy(argv[i], str, strlen(str) + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2351) len += strlen(str) + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2352) free(str);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2353) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2354) ff->ph->env.cmdline = cmdline;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2355) ff->ph->env.cmdline_argv = (const char **) argv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2356) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2358) error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2359) free(argv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2360) free(cmdline);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2361) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2362) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2364) static int process_cpu_topology(struct feat_fd *ff, void *data __maybe_unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2365) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2366) u32 nr, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2367) char *str;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2368) struct strbuf sb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2369) int cpu_nr = ff->ph->env.nr_cpus_avail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2370) u64 size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2371) struct perf_header *ph = ff->ph;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2372) bool do_core_id_test = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2374) ph->env.cpu = calloc(cpu_nr, sizeof(*ph->env.cpu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2375) if (!ph->env.cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2376) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2378) if (do_read_u32(ff, &nr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2379) goto free_cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2380)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2381) ph->env.nr_sibling_cores = nr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2382) size += sizeof(u32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2383) if (strbuf_init(&sb, 128) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2384) goto free_cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2385)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2386) for (i = 0; i < nr; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2387) str = do_read_string(ff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2388) if (!str)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2389) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2390)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2391) /* include a NULL character at the end */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2392) if (strbuf_add(&sb, str, strlen(str) + 1) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2393) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2394) size += string_size(str);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2395) free(str);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2396) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2397) ph->env.sibling_cores = strbuf_detach(&sb, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2398)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2399) if (do_read_u32(ff, &nr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2400) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2401)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2402) ph->env.nr_sibling_threads = nr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2403) size += sizeof(u32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2404)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2405) for (i = 0; i < nr; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2406) str = do_read_string(ff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2407) if (!str)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2408) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2409)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2410) /* include a NULL character at the end */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2411) if (strbuf_add(&sb, str, strlen(str) + 1) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2412) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2413) size += string_size(str);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2414) free(str);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2415) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2416) ph->env.sibling_threads = strbuf_detach(&sb, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2418) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2419) * The header may be from old perf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2420) * which doesn't include core id and socket id information.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2421) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2422) if (ff->size <= size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2423) zfree(&ph->env.cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2424) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2425) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2426)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2427) /* On s390 the socket_id number is not related to the numbers of cpus.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2428) * The socket_id number might be higher than the numbers of cpus.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2429) * This depends on the configuration.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2430) * AArch64 is the same.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2431) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2432) if (ph->env.arch && (!strncmp(ph->env.arch, "s390", 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2433) || !strncmp(ph->env.arch, "aarch64", 7)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2434) do_core_id_test = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2435)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2436) for (i = 0; i < (u32)cpu_nr; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2437) if (do_read_u32(ff, &nr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2438) goto free_cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2439)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2440) ph->env.cpu[i].core_id = nr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2441) size += sizeof(u32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2442)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2443) if (do_read_u32(ff, &nr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2444) goto free_cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2445)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2446) if (do_core_id_test && nr != (u32)-1 && nr > (u32)cpu_nr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2447) pr_debug("socket_id number is too big."
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2448) "You may need to upgrade the perf tool.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2449) goto free_cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2450) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2451)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2452) ph->env.cpu[i].socket_id = nr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2453) size += sizeof(u32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2454) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2455)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2456) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2457) * The header may be from old perf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2458) * which doesn't include die information.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2459) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2460) if (ff->size <= size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2461) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2463) if (do_read_u32(ff, &nr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2464) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2465)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2466) ph->env.nr_sibling_dies = nr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2467) size += sizeof(u32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2468)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2469) for (i = 0; i < nr; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2470) str = do_read_string(ff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2471) if (!str)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2472) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2474) /* include a NULL character at the end */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2475) if (strbuf_add(&sb, str, strlen(str) + 1) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2476) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2477) size += string_size(str);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2478) free(str);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2479) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2480) ph->env.sibling_dies = strbuf_detach(&sb, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2482) for (i = 0; i < (u32)cpu_nr; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2483) if (do_read_u32(ff, &nr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2484) goto free_cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2485)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2486) ph->env.cpu[i].die_id = nr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2487) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2489) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2490)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2491) error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2492) strbuf_release(&sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2493) free_cpu:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2494) zfree(&ph->env.cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2495) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2496) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2497)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2498) static int process_numa_topology(struct feat_fd *ff, void *data __maybe_unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2499) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2500) struct numa_node *nodes, *n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2501) u32 nr, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2502) char *str;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2503)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2504) /* nr nodes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2505) if (do_read_u32(ff, &nr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2506) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2507)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2508) nodes = zalloc(sizeof(*nodes) * nr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2509) if (!nodes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2510) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2511)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2512) for (i = 0; i < nr; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2513) n = &nodes[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2514)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2515) /* node number */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2516) if (do_read_u32(ff, &n->node))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2517) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2518)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2519) if (do_read_u64(ff, &n->mem_total))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2520) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2521)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2522) if (do_read_u64(ff, &n->mem_free))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2523) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2524)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2525) str = do_read_string(ff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2526) if (!str)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2527) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2528)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2529) n->map = perf_cpu_map__new(str);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2530) if (!n->map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2531) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2532)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2533) free(str);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2534) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2535) ff->ph->env.nr_numa_nodes = nr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2536) ff->ph->env.numa_nodes = nodes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2537) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2538)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2539) error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2540) free(nodes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2541) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2542) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2543)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2544) static int process_pmu_mappings(struct feat_fd *ff, void *data __maybe_unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2545) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2546) char *name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2547) u32 pmu_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2548) u32 type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2549) struct strbuf sb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2550)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2551) if (do_read_u32(ff, &pmu_num))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2552) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2553)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2554) if (!pmu_num) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2555) pr_debug("pmu mappings not available\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2556) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2557) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2558)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2559) ff->ph->env.nr_pmu_mappings = pmu_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2560) if (strbuf_init(&sb, 128) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2561) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2562)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2563) while (pmu_num) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2564) if (do_read_u32(ff, &type))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2565) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2566)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2567) name = do_read_string(ff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2568) if (!name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2569) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2570)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2571) if (strbuf_addf(&sb, "%u:%s", type, name) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2572) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2573) /* include a NULL character at the end */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2574) if (strbuf_add(&sb, "", 1) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2575) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2576)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2577) if (!strcmp(name, "msr"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2578) ff->ph->env.msr_pmu_type = type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2579)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2580) free(name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2581) pmu_num--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2582) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2583) ff->ph->env.pmu_mappings = strbuf_detach(&sb, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2584) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2585)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2586) error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2587) strbuf_release(&sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2588) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2589) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2590)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2591) static int process_group_desc(struct feat_fd *ff, void *data __maybe_unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2592) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2593) size_t ret = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2594) u32 i, nr, nr_groups;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2595) struct perf_session *session;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2596) struct evsel *evsel, *leader = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2597) struct group_desc {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2598) char *name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2599) u32 leader_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2600) u32 nr_members;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2601) } *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2602)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2603) if (do_read_u32(ff, &nr_groups))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2604) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2605)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2606) ff->ph->env.nr_groups = nr_groups;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2607) if (!nr_groups) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2608) pr_debug("group desc not available\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2609) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2610) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2611)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2612) desc = calloc(nr_groups, sizeof(*desc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2613) if (!desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2614) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2615)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2616) for (i = 0; i < nr_groups; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2617) desc[i].name = do_read_string(ff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2618) if (!desc[i].name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2619) goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2620)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2621) if (do_read_u32(ff, &desc[i].leader_idx))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2622) goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2623)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2624) if (do_read_u32(ff, &desc[i].nr_members))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2625) goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2626) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2627)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2628) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2629) * Rebuild group relationship based on the group_desc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2630) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2631) session = container_of(ff->ph, struct perf_session, header);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2632) session->evlist->nr_groups = nr_groups;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2633)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2634) i = nr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2635) evlist__for_each_entry(session->evlist, evsel) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2636) if (evsel->idx == (int) desc[i].leader_idx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2637) evsel->leader = evsel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2638) /* {anon_group} is a dummy name */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2639) if (strcmp(desc[i].name, "{anon_group}")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2640) evsel->group_name = desc[i].name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2641) desc[i].name = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2642) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2643) evsel->core.nr_members = desc[i].nr_members;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2644)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2645) if (i >= nr_groups || nr > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2646) pr_debug("invalid group desc\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2647) goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2648) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2649)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2650) leader = evsel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2651) nr = evsel->core.nr_members - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2652) i++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2653) } else if (nr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2654) /* This is a group member */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2655) evsel->leader = leader;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2656)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2657) nr--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2658) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2659) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2660)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2661) if (i != nr_groups || nr != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2662) pr_debug("invalid group desc\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2663) goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2664) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2665)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2666) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2667) out_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2668) for (i = 0; i < nr_groups; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2669) zfree(&desc[i].name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2670) free(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2671)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2672) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2673) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2674)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2675) static int process_auxtrace(struct feat_fd *ff, void *data __maybe_unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2676) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2677) struct perf_session *session;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2678) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2679)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2680) session = container_of(ff->ph, struct perf_session, header);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2681)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2682) err = auxtrace_index__process(ff->fd, ff->size, session,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2683) ff->ph->needs_swap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2684) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2685) pr_err("Failed to process auxtrace index\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2686) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2687) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2688)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2689) static int process_cache(struct feat_fd *ff, void *data __maybe_unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2690) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2691) struct cpu_cache_level *caches;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2692) u32 cnt, i, version;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2693)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2694) if (do_read_u32(ff, &version))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2695) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2696)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2697) if (version != 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2698) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2699)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2700) if (do_read_u32(ff, &cnt))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2701) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2702)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2703) caches = zalloc(sizeof(*caches) * cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2704) if (!caches)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2705) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2706)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2707) for (i = 0; i < cnt; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2708) struct cpu_cache_level c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2709)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2710) #define _R(v) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2711) if (do_read_u32(ff, &c.v))\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2712) goto out_free_caches; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2713)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2714) _R(level)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2715) _R(line_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2716) _R(sets)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2717) _R(ways)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2718) #undef _R
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2719)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2720) #define _R(v) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2721) c.v = do_read_string(ff); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2722) if (!c.v) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2723) goto out_free_caches;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2724)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2725) _R(type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2726) _R(size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2727) _R(map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2728) #undef _R
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2729)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2730) caches[i] = c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2731) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2732)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2733) ff->ph->env.caches = caches;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2734) ff->ph->env.caches_cnt = cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2735) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2736) out_free_caches:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2737) free(caches);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2738) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2739) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2740)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2741) static int process_sample_time(struct feat_fd *ff, void *data __maybe_unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2742) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2743) struct perf_session *session;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2744) u64 first_sample_time, last_sample_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2745) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2746)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2747) session = container_of(ff->ph, struct perf_session, header);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2748)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2749) ret = do_read_u64(ff, &first_sample_time);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2750) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2751) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2752)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2753) ret = do_read_u64(ff, &last_sample_time);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2754) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2755) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2756)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2757) session->evlist->first_sample_time = first_sample_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2758) session->evlist->last_sample_time = last_sample_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2759) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2760) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2761)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2762) static int process_mem_topology(struct feat_fd *ff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2763) void *data __maybe_unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2764) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2765) struct memory_node *nodes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2766) u64 version, i, nr, bsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2767) int ret = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2768)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2769) if (do_read_u64(ff, &version))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2770) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2771)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2772) if (version != 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2773) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2774)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2775) if (do_read_u64(ff, &bsize))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2776) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2777)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2778) if (do_read_u64(ff, &nr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2779) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2780)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2781) nodes = zalloc(sizeof(*nodes) * nr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2782) if (!nodes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2783) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2784)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2785) for (i = 0; i < nr; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2786) struct memory_node n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2787)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2788) #define _R(v) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2789) if (do_read_u64(ff, &n.v)) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2790) goto out; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2791)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2792) _R(node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2793) _R(size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2794)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2795) #undef _R
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2796)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2797) if (do_read_bitmap(ff, &n.set, &n.size))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2798) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2799)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2800) nodes[i] = n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2801) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2802)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2803) ff->ph->env.memory_bsize = bsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2804) ff->ph->env.memory_nodes = nodes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2805) ff->ph->env.nr_memory_nodes = nr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2806) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2807)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2808) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2809) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2810) free(nodes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2811) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2812) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2813)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2814) static int process_clockid(struct feat_fd *ff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2815) void *data __maybe_unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2816) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2817) if (do_read_u64(ff, &ff->ph->env.clock.clockid_res_ns))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2818) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2819)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2820) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2821) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2822)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2823) static int process_clock_data(struct feat_fd *ff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2824) void *_data __maybe_unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2825) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2826) u32 data32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2827) u64 data64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2828)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2829) /* version */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2830) if (do_read_u32(ff, &data32))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2831) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2832)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2833) if (data32 != 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2834) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2835)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2836) /* clockid */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2837) if (do_read_u32(ff, &data32))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2838) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2839)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2840) ff->ph->env.clock.clockid = data32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2841)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2842) /* TOD ref time */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2843) if (do_read_u64(ff, &data64))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2844) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2845)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2846) ff->ph->env.clock.tod_ns = data64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2847)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2848) /* clockid ref time */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2849) if (do_read_u64(ff, &data64))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2850) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2851)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2852) ff->ph->env.clock.clockid_ns = data64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2853) ff->ph->env.clock.enabled = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2854) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2855) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2856)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2857) static int process_dir_format(struct feat_fd *ff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2858) void *_data __maybe_unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2859) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2860) struct perf_session *session;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2861) struct perf_data *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2862)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2863) session = container_of(ff->ph, struct perf_session, header);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2864) data = session->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2865)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2866) if (WARN_ON(!perf_data__is_dir(data)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2867) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2868)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2869) return do_read_u64(ff, &data->dir.version);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2870) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2871)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2872) #ifdef HAVE_LIBBPF_SUPPORT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2873) static int process_bpf_prog_info(struct feat_fd *ff, void *data __maybe_unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2874) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2875) struct bpf_prog_info_linear *info_linear;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2876) struct bpf_prog_info_node *info_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2877) struct perf_env *env = &ff->ph->env;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2878) u32 count, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2879) int err = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2880)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2881) if (ff->ph->needs_swap) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2882) pr_warning("interpreting bpf_prog_info from systems with endianity is not yet supported\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2883) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2884) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2885)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2886) if (do_read_u32(ff, &count))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2887) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2888)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2889) down_write(&env->bpf_progs.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2890)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2891) for (i = 0; i < count; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2892) u32 info_len, data_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2893)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2894) info_linear = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2895) info_node = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2896) if (do_read_u32(ff, &info_len))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2897) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2898) if (do_read_u32(ff, &data_len))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2899) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2900)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2901) if (info_len > sizeof(struct bpf_prog_info)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2902) pr_warning("detected invalid bpf_prog_info\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2903) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2904) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2905)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2906) info_linear = malloc(sizeof(struct bpf_prog_info_linear) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2907) data_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2908) if (!info_linear)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2909) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2910) info_linear->info_len = sizeof(struct bpf_prog_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2911) info_linear->data_len = data_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2912) if (do_read_u64(ff, (u64 *)(&info_linear->arrays)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2913) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2914) if (__do_read(ff, &info_linear->info, info_len))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2915) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2916) if (info_len < sizeof(struct bpf_prog_info))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2917) memset(((void *)(&info_linear->info)) + info_len, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2918) sizeof(struct bpf_prog_info) - info_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2919)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2920) if (__do_read(ff, info_linear->data, data_len))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2921) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2922)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2923) info_node = malloc(sizeof(struct bpf_prog_info_node));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2924) if (!info_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2925) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2926)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2927) /* after reading from file, translate offset to address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2928) bpf_program__bpil_offs_to_addr(info_linear);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2929) info_node->info_linear = info_linear;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2930) perf_env__insert_bpf_prog_info(env, info_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2931) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2932)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2933) up_write(&env->bpf_progs.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2934) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2935) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2936) free(info_linear);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2937) free(info_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2938) up_write(&env->bpf_progs.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2939) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2940) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2941) #else // HAVE_LIBBPF_SUPPORT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2942) static int process_bpf_prog_info(struct feat_fd *ff __maybe_unused, void *data __maybe_unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2943) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2944) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2945) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2946) #endif // HAVE_LIBBPF_SUPPORT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2947)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2948) static int process_bpf_btf(struct feat_fd *ff, void *data __maybe_unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2949) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2950) struct perf_env *env = &ff->ph->env;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2951) struct btf_node *node = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2952) u32 count, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2953) int err = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2954)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2955) if (ff->ph->needs_swap) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2956) pr_warning("interpreting btf from systems with endianity is not yet supported\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2957) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2958) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2959)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2960) if (do_read_u32(ff, &count))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2961) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2962)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2963) down_write(&env->bpf_progs.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2964)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2965) for (i = 0; i < count; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2966) u32 id, data_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2967)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2968) if (do_read_u32(ff, &id))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2969) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2970) if (do_read_u32(ff, &data_size))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2971) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2972)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2973) node = malloc(sizeof(struct btf_node) + data_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2974) if (!node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2975) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2976)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2977) node->id = id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2978) node->data_size = data_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2979)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2980) if (__do_read(ff, node->data, data_size))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2981) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2982)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2983) perf_env__insert_btf(env, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2984) node = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2985) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2986)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2987) err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2988) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2989) up_write(&env->bpf_progs.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2990) free(node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2991) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2992) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2993)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2994) static int process_compressed(struct feat_fd *ff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2995) void *data __maybe_unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2996) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2997) if (do_read_u32(ff, &(ff->ph->env.comp_ver)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2998) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2999)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3000) if (do_read_u32(ff, &(ff->ph->env.comp_type)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3001) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3002)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3003) if (do_read_u32(ff, &(ff->ph->env.comp_level)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3004) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3005)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3006) if (do_read_u32(ff, &(ff->ph->env.comp_ratio)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3007) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3008)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3009) if (do_read_u32(ff, &(ff->ph->env.comp_mmap_len)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3010) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3011)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3012) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3013) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3014)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3015) static int process_cpu_pmu_caps(struct feat_fd *ff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3016) void *data __maybe_unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3017) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3018) char *name, *value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3019) struct strbuf sb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3020) u32 nr_caps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3021)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3022) if (do_read_u32(ff, &nr_caps))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3023) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3024)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3025) if (!nr_caps) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3026) pr_debug("cpu pmu capabilities not available\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3027) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3028) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3029)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3030) ff->ph->env.nr_cpu_pmu_caps = nr_caps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3031)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3032) if (strbuf_init(&sb, 128) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3033) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3034)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3035) while (nr_caps--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3036) name = do_read_string(ff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3037) if (!name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3038) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3039)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3040) value = do_read_string(ff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3041) if (!value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3042) goto free_name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3043)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3044) if (strbuf_addf(&sb, "%s=%s", name, value) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3045) goto free_value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3046)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3047) /* include a NULL character at the end */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3048) if (strbuf_add(&sb, "", 1) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3049) goto free_value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3050)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3051) if (!strcmp(name, "branches"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3052) ff->ph->env.max_branches = atoi(value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3053)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3054) free(value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3055) free(name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3056) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3057) ff->ph->env.cpu_pmu_caps = strbuf_detach(&sb, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3058) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3059)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3060) free_value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3061) free(value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3062) free_name:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3063) free(name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3064) error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3065) strbuf_release(&sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3066) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3067) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3068)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3069) #define FEAT_OPR(n, func, __full_only) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3070) [HEADER_##n] = { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3071) .name = __stringify(n), \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3072) .write = write_##func, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3073) .print = print_##func, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3074) .full_only = __full_only, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3075) .process = process_##func, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3076) .synthesize = true \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3077) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3078)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3079) #define FEAT_OPN(n, func, __full_only) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3080) [HEADER_##n] = { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3081) .name = __stringify(n), \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3082) .write = write_##func, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3083) .print = print_##func, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3084) .full_only = __full_only, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3085) .process = process_##func \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3086) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3087)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3088) /* feature_ops not implemented: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3089) #define print_tracing_data NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3090) #define print_build_id NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3091)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3092) #define process_branch_stack NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3093) #define process_stat NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3094)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3095) // Only used in util/synthetic-events.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3096) const struct perf_header_feature_ops feat_ops[HEADER_LAST_FEATURE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3097)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3098) const struct perf_header_feature_ops feat_ops[HEADER_LAST_FEATURE] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3099) FEAT_OPN(TRACING_DATA, tracing_data, false),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3100) FEAT_OPN(BUILD_ID, build_id, false),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3101) FEAT_OPR(HOSTNAME, hostname, false),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3102) FEAT_OPR(OSRELEASE, osrelease, false),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3103) FEAT_OPR(VERSION, version, false),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3104) FEAT_OPR(ARCH, arch, false),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3105) FEAT_OPR(NRCPUS, nrcpus, false),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3106) FEAT_OPR(CPUDESC, cpudesc, false),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3107) FEAT_OPR(CPUID, cpuid, false),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3108) FEAT_OPR(TOTAL_MEM, total_mem, false),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3109) FEAT_OPR(EVENT_DESC, event_desc, false),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3110) FEAT_OPR(CMDLINE, cmdline, false),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3111) FEAT_OPR(CPU_TOPOLOGY, cpu_topology, true),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3112) FEAT_OPR(NUMA_TOPOLOGY, numa_topology, true),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3113) FEAT_OPN(BRANCH_STACK, branch_stack, false),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3114) FEAT_OPR(PMU_MAPPINGS, pmu_mappings, false),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3115) FEAT_OPR(GROUP_DESC, group_desc, false),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3116) FEAT_OPN(AUXTRACE, auxtrace, false),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3117) FEAT_OPN(STAT, stat, false),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3118) FEAT_OPN(CACHE, cache, true),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3119) FEAT_OPR(SAMPLE_TIME, sample_time, false),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3120) FEAT_OPR(MEM_TOPOLOGY, mem_topology, true),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3121) FEAT_OPR(CLOCKID, clockid, false),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3122) FEAT_OPN(DIR_FORMAT, dir_format, false),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3123) FEAT_OPR(BPF_PROG_INFO, bpf_prog_info, false),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3124) FEAT_OPR(BPF_BTF, bpf_btf, false),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3125) FEAT_OPR(COMPRESSED, compressed, false),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3126) FEAT_OPR(CPU_PMU_CAPS, cpu_pmu_caps, false),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3127) FEAT_OPR(CLOCK_DATA, clock_data, false),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3128) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3130) struct header_print_data {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3131) FILE *fp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3132) bool full; /* extended list of headers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3133) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3135) static int perf_file_section__fprintf_info(struct perf_file_section *section,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3136) struct perf_header *ph,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3137) int feat, int fd, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3138) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3139) struct header_print_data *hd = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3140) struct feat_fd ff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3142) if (lseek(fd, section->offset, SEEK_SET) == (off_t)-1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3143) pr_debug("Failed to lseek to %" PRIu64 " offset for feature "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3144) "%d, continuing...\n", section->offset, feat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3145) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3146) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3147) if (feat >= HEADER_LAST_FEATURE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3148) pr_warning("unknown feature %d\n", feat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3149) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3150) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3151) if (!feat_ops[feat].print)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3152) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3154) ff = (struct feat_fd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3155) .fd = fd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3156) .ph = ph,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3157) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3159) if (!feat_ops[feat].full_only || hd->full)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3160) feat_ops[feat].print(&ff, hd->fp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3161) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3162) fprintf(hd->fp, "# %s info available, use -I to display\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3163) feat_ops[feat].name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3165) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3166) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3168) int perf_header__fprintf_info(struct perf_session *session, FILE *fp, bool full)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3169) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3170) struct header_print_data hd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3171) struct perf_header *header = &session->header;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3172) int fd = perf_data__fd(session->data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3173) struct stat st;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3174) time_t stctime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3175) int ret, bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3177) hd.fp = fp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3178) hd.full = full;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3180) ret = fstat(fd, &st);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3181) if (ret == -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3182) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3184) stctime = st.st_mtime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3185) fprintf(fp, "# captured on : %s", ctime(&stctime));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3187) fprintf(fp, "# header version : %u\n", header->version);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3188) fprintf(fp, "# data offset : %" PRIu64 "\n", header->data_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3189) fprintf(fp, "# data size : %" PRIu64 "\n", header->data_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3190) fprintf(fp, "# feat offset : %" PRIu64 "\n", header->feat_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3192) perf_header__process_sections(header, fd, &hd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3193) perf_file_section__fprintf_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3195) if (session->data->is_pipe)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3196) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3198) fprintf(fp, "# missing features: ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3199) for_each_clear_bit(bit, header->adds_features, HEADER_LAST_FEATURE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3200) if (bit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3201) fprintf(fp, "%s ", feat_ops[bit].name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3202) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3204) fprintf(fp, "\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3205) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3206) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3208) static int do_write_feat(struct feat_fd *ff, int type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3209) struct perf_file_section **p,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3210) struct evlist *evlist)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3211) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3212) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3213) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3215) if (perf_header__has_feat(ff->ph, type)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3216) if (!feat_ops[type].write)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3217) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3219) if (WARN(ff->buf, "Error: calling %s in pipe-mode.\n", __func__))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3220) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3222) (*p)->offset = lseek(ff->fd, 0, SEEK_CUR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3224) err = feat_ops[type].write(ff, evlist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3225) if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3226) pr_debug("failed to write feature %s\n", feat_ops[type].name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3228) /* undo anything written */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3229) lseek(ff->fd, (*p)->offset, SEEK_SET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3231) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3232) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3233) (*p)->size = lseek(ff->fd, 0, SEEK_CUR) - (*p)->offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3234) (*p)++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3235) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3236) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3237) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3239) static int perf_header__adds_write(struct perf_header *header,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3240) struct evlist *evlist, int fd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3241) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3242) int nr_sections;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3243) struct feat_fd ff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3244) struct perf_file_section *feat_sec, *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3245) int sec_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3246) u64 sec_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3247) int feat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3248) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3250) ff = (struct feat_fd){
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3251) .fd = fd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3252) .ph = header,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3253) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3255) nr_sections = bitmap_weight(header->adds_features, HEADER_FEAT_BITS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3256) if (!nr_sections)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3257) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3259) feat_sec = p = calloc(nr_sections, sizeof(*feat_sec));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3260) if (feat_sec == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3261) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3263) sec_size = sizeof(*feat_sec) * nr_sections;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3265) sec_start = header->feat_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3266) lseek(fd, sec_start + sec_size, SEEK_SET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3268) for_each_set_bit(feat, header->adds_features, HEADER_FEAT_BITS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3269) if (do_write_feat(&ff, feat, &p, evlist))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3270) perf_header__clear_feat(header, feat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3271) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3273) lseek(fd, sec_start, SEEK_SET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3274) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3275) * may write more than needed due to dropped feature, but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3276) * this is okay, reader will skip the missing entries
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3277) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3278) err = do_write(&ff, feat_sec, sec_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3279) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3280) pr_debug("failed to write feature section\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3281) free(feat_sec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3282) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3283) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3285) int perf_header__write_pipe(int fd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3286) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3287) struct perf_pipe_file_header f_header;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3288) struct feat_fd ff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3289) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3291) ff = (struct feat_fd){ .fd = fd };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3293) f_header = (struct perf_pipe_file_header){
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3294) .magic = PERF_MAGIC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3295) .size = sizeof(f_header),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3296) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3298) err = do_write(&ff, &f_header, sizeof(f_header));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3299) if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3300) pr_debug("failed to write perf pipe header\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3301) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3302) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3304) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3305) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3307) int perf_session__write_header(struct perf_session *session,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3308) struct evlist *evlist,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3309) int fd, bool at_exit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3310) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3311) struct perf_file_header f_header;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3312) struct perf_file_attr f_attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3313) struct perf_header *header = &session->header;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3314) struct evsel *evsel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3315) struct feat_fd ff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3316) u64 attr_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3317) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3319) ff = (struct feat_fd){ .fd = fd};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3320) lseek(fd, sizeof(f_header), SEEK_SET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3322) evlist__for_each_entry(session->evlist, evsel) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3323) evsel->id_offset = lseek(fd, 0, SEEK_CUR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3324) err = do_write(&ff, evsel->core.id, evsel->core.ids * sizeof(u64));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3325) if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3326) pr_debug("failed to write perf header\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3327) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3328) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3329) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3331) attr_offset = lseek(ff.fd, 0, SEEK_CUR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3333) evlist__for_each_entry(evlist, evsel) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3334) f_attr = (struct perf_file_attr){
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3335) .attr = evsel->core.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3336) .ids = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3337) .offset = evsel->id_offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3338) .size = evsel->core.ids * sizeof(u64),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3339) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3340) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3341) err = do_write(&ff, &f_attr, sizeof(f_attr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3342) if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3343) pr_debug("failed to write perf header attribute\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3344) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3345) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3346) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3347)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3348) if (!header->data_offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3349) header->data_offset = lseek(fd, 0, SEEK_CUR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3350) header->feat_offset = header->data_offset + header->data_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3352) if (at_exit) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3353) err = perf_header__adds_write(header, evlist, fd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3354) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3355) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3356) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3358) f_header = (struct perf_file_header){
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3359) .magic = PERF_MAGIC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3360) .size = sizeof(f_header),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3361) .attr_size = sizeof(f_attr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3362) .attrs = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3363) .offset = attr_offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3364) .size = evlist->core.nr_entries * sizeof(f_attr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3365) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3366) .data = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3367) .offset = header->data_offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3368) .size = header->data_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3369) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3370) /* event_types is ignored, store zeros */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3371) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3373) memcpy(&f_header.adds_features, &header->adds_features, sizeof(header->adds_features));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3374)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3375) lseek(fd, 0, SEEK_SET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3376) err = do_write(&ff, &f_header, sizeof(f_header));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3377) if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3378) pr_debug("failed to write perf header\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3379) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3380) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3381) lseek(fd, header->data_offset + header->data_size, SEEK_SET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3382)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3383) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3384) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3385)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3386) static int perf_header__getbuffer64(struct perf_header *header,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3387) int fd, void *buf, size_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3388) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3389) if (readn(fd, buf, size) <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3390) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3392) if (header->needs_swap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3393) mem_bswap_64(buf, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3395) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3396) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3397)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3398) int perf_header__process_sections(struct perf_header *header, int fd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3399) void *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3400) int (*process)(struct perf_file_section *section,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3401) struct perf_header *ph,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3402) int feat, int fd, void *data))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3403) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3404) struct perf_file_section *feat_sec, *sec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3405) int nr_sections;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3406) int sec_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3407) int feat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3408) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3409)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3410) nr_sections = bitmap_weight(header->adds_features, HEADER_FEAT_BITS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3411) if (!nr_sections)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3412) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3414) feat_sec = sec = calloc(nr_sections, sizeof(*feat_sec));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3415) if (!feat_sec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3416) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3418) sec_size = sizeof(*feat_sec) * nr_sections;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3419)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3420) lseek(fd, header->feat_offset, SEEK_SET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3421)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3422) err = perf_header__getbuffer64(header, fd, feat_sec, sec_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3423) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3424) goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3425)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3426) for_each_set_bit(feat, header->adds_features, HEADER_LAST_FEATURE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3427) err = process(sec++, header, feat, fd, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3428) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3429) goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3430) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3431) err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3432) out_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3433) free(feat_sec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3434) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3435) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3436)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3437) static const int attr_file_abi_sizes[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3438) [0] = PERF_ATTR_SIZE_VER0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3439) [1] = PERF_ATTR_SIZE_VER1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3440) [2] = PERF_ATTR_SIZE_VER2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3441) [3] = PERF_ATTR_SIZE_VER3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3442) [4] = PERF_ATTR_SIZE_VER4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3443) 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3444) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3445)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3446) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3447) * In the legacy file format, the magic number is not used to encode endianness.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3448) * hdr_sz was used to encode endianness. But given that hdr_sz can vary based
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3449) * on ABI revisions, we need to try all combinations for all endianness to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3450) * detect the endianness.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3451) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3452) static int try_all_file_abis(uint64_t hdr_sz, struct perf_header *ph)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3453) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3454) uint64_t ref_size, attr_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3455) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3456)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3457) for (i = 0 ; attr_file_abi_sizes[i]; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3458) ref_size = attr_file_abi_sizes[i]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3459) + sizeof(struct perf_file_section);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3460) if (hdr_sz != ref_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3461) attr_size = bswap_64(hdr_sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3462) if (attr_size != ref_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3463) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3464)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3465) ph->needs_swap = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3466) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3467) pr_debug("ABI%d perf.data file detected, need_swap=%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3468) i,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3469) ph->needs_swap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3470) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3471) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3472) /* could not determine endianness */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3473) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3474) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3475)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3476) #define PERF_PIPE_HDR_VER0 16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3477)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3478) static const size_t attr_pipe_abi_sizes[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3479) [0] = PERF_PIPE_HDR_VER0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3480) 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3481) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3482)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3483) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3484) * In the legacy pipe format, there is an implicit assumption that endiannesss
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3485) * between host recording the samples, and host parsing the samples is the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3486) * same. This is not always the case given that the pipe output may always be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3487) * redirected into a file and analyzed on a different machine with possibly a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3488) * different endianness and perf_event ABI revsions in the perf tool itself.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3489) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3490) static int try_all_pipe_abis(uint64_t hdr_sz, struct perf_header *ph)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3491) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3492) u64 attr_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3493) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3494)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3495) for (i = 0 ; attr_pipe_abi_sizes[i]; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3496) if (hdr_sz != attr_pipe_abi_sizes[i]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3497) attr_size = bswap_64(hdr_sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3498) if (attr_size != hdr_sz)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3499) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3500)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3501) ph->needs_swap = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3502) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3503) pr_debug("Pipe ABI%d perf.data file detected\n", i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3504) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3505) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3506) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3507) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3508)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3509) bool is_perf_magic(u64 magic)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3510) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3511) if (!memcmp(&magic, __perf_magic1, sizeof(magic))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3512) || magic == __perf_magic2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3513) || magic == __perf_magic2_sw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3514) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3515)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3516) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3517) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3518)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3519) static int check_magic_endian(u64 magic, uint64_t hdr_sz,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3520) bool is_pipe, struct perf_header *ph)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3521) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3522) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3523)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3524) /* check for legacy format */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3525) ret = memcmp(&magic, __perf_magic1, sizeof(magic));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3526) if (ret == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3527) ph->version = PERF_HEADER_VERSION_1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3528) pr_debug("legacy perf.data format\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3529) if (is_pipe)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3530) return try_all_pipe_abis(hdr_sz, ph);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3531)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3532) return try_all_file_abis(hdr_sz, ph);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3533) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3534) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3535) * the new magic number serves two purposes:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3536) * - unique number to identify actual perf.data files
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3537) * - encode endianness of file
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3538) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3539) ph->version = PERF_HEADER_VERSION_2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3540)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3541) /* check magic number with one endianness */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3542) if (magic == __perf_magic2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3543) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3544)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3545) /* check magic number with opposite endianness */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3546) if (magic != __perf_magic2_sw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3547) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3548)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3549) ph->needs_swap = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3550)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3551) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3552) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3553)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3554) int perf_file_header__read(struct perf_file_header *header,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3555) struct perf_header *ph, int fd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3556) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3557) ssize_t ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3558)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3559) lseek(fd, 0, SEEK_SET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3560)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3561) ret = readn(fd, header, sizeof(*header));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3562) if (ret <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3563) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3564)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3565) if (check_magic_endian(header->magic,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3566) header->attr_size, false, ph) < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3567) pr_debug("magic/endian check failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3568) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3569) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3570)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3571) if (ph->needs_swap) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3572) mem_bswap_64(header, offsetof(struct perf_file_header,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3573) adds_features));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3574) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3575)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3576) if (header->size != sizeof(*header)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3577) /* Support the previous format */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3578) if (header->size == offsetof(typeof(*header), adds_features))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3579) bitmap_zero(header->adds_features, HEADER_FEAT_BITS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3580) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3581) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3582) } else if (ph->needs_swap) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3583) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3584) * feature bitmap is declared as an array of unsigned longs --
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3585) * not good since its size can differ between the host that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3586) * generated the data file and the host analyzing the file.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3587) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3588) * We need to handle endianness, but we don't know the size of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3589) * the unsigned long where the file was generated. Take a best
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3590) * guess at determining it: try 64-bit swap first (ie., file
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3591) * created on a 64-bit host), and check if the hostname feature
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3592) * bit is set (this feature bit is forced on as of fbe96f2).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3593) * If the bit is not, undo the 64-bit swap and try a 32-bit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3594) * swap. If the hostname bit is still not set (e.g., older data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3595) * file), punt and fallback to the original behavior --
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3596) * clearing all feature bits and setting buildid.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3597) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3598) mem_bswap_64(&header->adds_features,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3599) BITS_TO_U64(HEADER_FEAT_BITS));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3600)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3601) if (!test_bit(HEADER_HOSTNAME, header->adds_features)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3602) /* unswap as u64 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3603) mem_bswap_64(&header->adds_features,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3604) BITS_TO_U64(HEADER_FEAT_BITS));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3605)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3606) /* unswap as u32 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3607) mem_bswap_32(&header->adds_features,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3608) BITS_TO_U32(HEADER_FEAT_BITS));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3609) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3610)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3611) if (!test_bit(HEADER_HOSTNAME, header->adds_features)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3612) bitmap_zero(header->adds_features, HEADER_FEAT_BITS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3613) set_bit(HEADER_BUILD_ID, header->adds_features);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3614) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3615) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3616)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3617) memcpy(&ph->adds_features, &header->adds_features,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3618) sizeof(ph->adds_features));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3619)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3620) ph->data_offset = header->data.offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3621) ph->data_size = header->data.size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3622) ph->feat_offset = header->data.offset + header->data.size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3623) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3624) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3625)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3626) static int perf_file_section__process(struct perf_file_section *section,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3627) struct perf_header *ph,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3628) int feat, int fd, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3629) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3630) struct feat_fd fdd = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3631) .fd = fd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3632) .ph = ph,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3633) .size = section->size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3634) .offset = section->offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3635) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3636)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3637) if (lseek(fd, section->offset, SEEK_SET) == (off_t)-1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3638) pr_debug("Failed to lseek to %" PRIu64 " offset for feature "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3639) "%d, continuing...\n", section->offset, feat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3640) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3641) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3642)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3643) if (feat >= HEADER_LAST_FEATURE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3644) pr_debug("unknown feature %d, continuing...\n", feat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3645) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3646) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3647)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3648) if (!feat_ops[feat].process)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3649) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3650)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3651) return feat_ops[feat].process(&fdd, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3652) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3653)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3654) static int perf_file_header__read_pipe(struct perf_pipe_file_header *header,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3655) struct perf_header *ph, int fd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3656) bool repipe)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3657) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3658) struct feat_fd ff = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3659) .fd = STDOUT_FILENO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3660) .ph = ph,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3661) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3662) ssize_t ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3663)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3664) ret = readn(fd, header, sizeof(*header));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3665) if (ret <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3666) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3667)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3668) if (check_magic_endian(header->magic, header->size, true, ph) < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3669) pr_debug("endian/magic failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3670) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3671) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3672)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3673) if (ph->needs_swap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3674) header->size = bswap_64(header->size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3675)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3676) if (repipe && do_write(&ff, header, sizeof(*header)) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3677) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3678)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3679) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3680) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3681)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3682) static int perf_header__read_pipe(struct perf_session *session)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3683) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3684) struct perf_header *header = &session->header;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3685) struct perf_pipe_file_header f_header;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3686)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3687) if (perf_file_header__read_pipe(&f_header, header,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3688) perf_data__fd(session->data),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3689) session->repipe) < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3690) pr_debug("incompatible file format\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3691) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3692) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3693)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3694) return f_header.size == sizeof(f_header) ? 0 : -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3695) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3696)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3697) static int read_attr(int fd, struct perf_header *ph,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3698) struct perf_file_attr *f_attr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3699) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3700) struct perf_event_attr *attr = &f_attr->attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3701) size_t sz, left;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3702) size_t our_sz = sizeof(f_attr->attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3703) ssize_t ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3704)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3705) memset(f_attr, 0, sizeof(*f_attr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3706)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3707) /* read minimal guaranteed structure */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3708) ret = readn(fd, attr, PERF_ATTR_SIZE_VER0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3709) if (ret <= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3710) pr_debug("cannot read %d bytes of header attr\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3711) PERF_ATTR_SIZE_VER0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3712) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3713) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3714)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3715) /* on file perf_event_attr size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3716) sz = attr->size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3717)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3718) if (ph->needs_swap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3719) sz = bswap_32(sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3720)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3721) if (sz == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3722) /* assume ABI0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3723) sz = PERF_ATTR_SIZE_VER0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3724) } else if (sz > our_sz) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3725) pr_debug("file uses a more recent and unsupported ABI"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3726) " (%zu bytes extra)\n", sz - our_sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3727) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3728) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3729) /* what we have not yet read and that we know about */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3730) left = sz - PERF_ATTR_SIZE_VER0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3731) if (left) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3732) void *ptr = attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3733) ptr += PERF_ATTR_SIZE_VER0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3734)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3735) ret = readn(fd, ptr, left);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3736) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3737) /* read perf_file_section, ids are read in caller */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3738) ret = readn(fd, &f_attr->ids, sizeof(f_attr->ids));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3739)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3740) return ret <= 0 ? -1 : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3741) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3742)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3743) static int perf_evsel__prepare_tracepoint_event(struct evsel *evsel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3744) struct tep_handle *pevent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3745) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3746) struct tep_event *event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3747) char bf[128];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3748)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3749) /* already prepared */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3750) if (evsel->tp_format)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3751) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3752)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3753) if (pevent == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3754) pr_debug("broken or missing trace data\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3755) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3756) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3757)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3758) event = tep_find_event(pevent, evsel->core.attr.config);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3759) if (event == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3760) pr_debug("cannot find event format for %d\n", (int)evsel->core.attr.config);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3761) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3762) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3763)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3764) if (!evsel->name) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3765) snprintf(bf, sizeof(bf), "%s:%s", event->system, event->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3766) evsel->name = strdup(bf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3767) if (evsel->name == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3768) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3769) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3770)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3771) evsel->tp_format = event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3772) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3773) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3774)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3775) static int perf_evlist__prepare_tracepoint_events(struct evlist *evlist,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3776) struct tep_handle *pevent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3777) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3778) struct evsel *pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3779)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3780) evlist__for_each_entry(evlist, pos) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3781) if (pos->core.attr.type == PERF_TYPE_TRACEPOINT &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3782) perf_evsel__prepare_tracepoint_event(pos, pevent))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3783) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3784) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3785)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3786) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3787) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3788)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3789) int perf_session__read_header(struct perf_session *session)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3790) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3791) struct perf_data *data = session->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3792) struct perf_header *header = &session->header;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3793) struct perf_file_header f_header;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3794) struct perf_file_attr f_attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3795) u64 f_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3796) int nr_attrs, nr_ids, i, j, err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3797) int fd = perf_data__fd(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3798)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3799) session->evlist = evlist__new();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3800) if (session->evlist == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3801) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3802)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3803) session->evlist->env = &header->env;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3804) session->machines.host.env = &header->env;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3805)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3806) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3807) * We can read 'pipe' data event from regular file,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3808) * check for the pipe header regardless of source.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3809) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3810) err = perf_header__read_pipe(session);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3811) if (!err || (err && perf_data__is_pipe(data))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3812) data->is_pipe = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3813) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3814) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3815)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3816) if (perf_file_header__read(&f_header, header, fd) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3817) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3818)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3819) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3820) * Sanity check that perf.data was written cleanly; data size is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3821) * initialized to 0 and updated only if the on_exit function is run.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3822) * If data size is still 0 then the file contains only partial
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3823) * information. Just warn user and process it as much as it can.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3824) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3825) if (f_header.data.size == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3826) pr_warning("WARNING: The %s file's data size field is 0 which is unexpected.\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3827) "Was the 'perf record' command properly terminated?\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3828) data->file.path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3829) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3830)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3831) if (f_header.attr_size == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3832) pr_err("ERROR: The %s file's attr size field is 0 which is unexpected.\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3833) "Was the 'perf record' command properly terminated?\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3834) data->file.path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3835) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3836) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3837)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3838) nr_attrs = f_header.attrs.size / f_header.attr_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3839) lseek(fd, f_header.attrs.offset, SEEK_SET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3840)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3841) for (i = 0; i < nr_attrs; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3842) struct evsel *evsel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3843) off_t tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3844)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3845) if (read_attr(fd, header, &f_attr) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3846) goto out_errno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3847)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3848) if (header->needs_swap) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3849) f_attr.ids.size = bswap_64(f_attr.ids.size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3850) f_attr.ids.offset = bswap_64(f_attr.ids.offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3851) perf_event__attr_swap(&f_attr.attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3852) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3853)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3854) tmp = lseek(fd, 0, SEEK_CUR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3855) evsel = evsel__new(&f_attr.attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3856)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3857) if (evsel == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3858) goto out_delete_evlist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3859)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3860) evsel->needs_swap = header->needs_swap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3861) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3862) * Do it before so that if perf_evsel__alloc_id fails, this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3863) * entry gets purged too at evlist__delete().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3864) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3865) evlist__add(session->evlist, evsel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3866)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3867) nr_ids = f_attr.ids.size / sizeof(u64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3868) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3869) * We don't have the cpu and thread maps on the header, so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3870) * for allocating the perf_sample_id table we fake 1 cpu and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3871) * hattr->ids threads.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3872) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3873) if (perf_evsel__alloc_id(&evsel->core, 1, nr_ids))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3874) goto out_delete_evlist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3875)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3876) lseek(fd, f_attr.ids.offset, SEEK_SET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3877)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3878) for (j = 0; j < nr_ids; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3879) if (perf_header__getbuffer64(header, fd, &f_id, sizeof(f_id)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3880) goto out_errno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3881)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3882) perf_evlist__id_add(&session->evlist->core, &evsel->core, 0, j, f_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3883) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3884)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3885) lseek(fd, tmp, SEEK_SET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3886) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3887)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3888) perf_header__process_sections(header, fd, &session->tevent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3889) perf_file_section__process);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3890)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3891) if (perf_evlist__prepare_tracepoint_events(session->evlist,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3892) session->tevent.pevent))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3893) goto out_delete_evlist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3894)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3895) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3896) out_errno:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3897) return -errno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3898)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3899) out_delete_evlist:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3900) evlist__delete(session->evlist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3901) session->evlist = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3902) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3903) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3904)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3905) int perf_event__process_feature(struct perf_session *session,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3906) union perf_event *event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3907) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3908) struct perf_tool *tool = session->tool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3909) struct feat_fd ff = { .fd = 0 };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3910) struct perf_record_header_feature *fe = (struct perf_record_header_feature *)event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3911) int type = fe->header.type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3912) u64 feat = fe->feat_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3913)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3914) if (type < 0 || type >= PERF_RECORD_HEADER_MAX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3915) pr_warning("invalid record type %d in pipe-mode\n", type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3916) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3917) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3918) if (feat == HEADER_RESERVED || feat >= HEADER_LAST_FEATURE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3919) pr_warning("invalid record type %d in pipe-mode\n", type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3920) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3921) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3922)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3923) if (!feat_ops[feat].process)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3924) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3925)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3926) ff.buf = (void *)fe->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3927) ff.size = event->header.size - sizeof(*fe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3928) ff.ph = &session->header;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3929)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3930) if (feat_ops[feat].process(&ff, NULL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3931) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3932)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3933) if (!feat_ops[feat].print || !tool->show_feat_hdr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3934) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3935)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3936) if (!feat_ops[feat].full_only ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3937) tool->show_feat_hdr >= SHOW_FEAT_HEADER_FULL_INFO) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3938) feat_ops[feat].print(&ff, stdout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3939) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3940) fprintf(stdout, "# %s info available, use -I to display\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3941) feat_ops[feat].name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3942) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3943)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3944) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3945) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3946)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3947) size_t perf_event__fprintf_event_update(union perf_event *event, FILE *fp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3948) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3949) struct perf_record_event_update *ev = &event->event_update;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3950) struct perf_record_event_update_scale *ev_scale;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3951) struct perf_record_event_update_cpus *ev_cpus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3952) struct perf_cpu_map *map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3953) size_t ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3954)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3955) ret = fprintf(fp, "\n... id: %" PRI_lu64 "\n", ev->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3956)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3957) switch (ev->type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3958) case PERF_EVENT_UPDATE__SCALE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3959) ev_scale = (struct perf_record_event_update_scale *)ev->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3960) ret += fprintf(fp, "... scale: %f\n", ev_scale->scale);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3961) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3962) case PERF_EVENT_UPDATE__UNIT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3963) ret += fprintf(fp, "... unit: %s\n", ev->data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3964) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3965) case PERF_EVENT_UPDATE__NAME:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3966) ret += fprintf(fp, "... name: %s\n", ev->data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3967) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3968) case PERF_EVENT_UPDATE__CPUS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3969) ev_cpus = (struct perf_record_event_update_cpus *)ev->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3970) ret += fprintf(fp, "... ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3971)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3972) map = cpu_map__new_data(&ev_cpus->cpus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3973) if (map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3974) ret += cpu_map__fprintf(map, fp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3975) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3976) ret += fprintf(fp, "failed to get cpus\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3977) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3978) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3979) ret += fprintf(fp, "... unknown type\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3980) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3981) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3982)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3983) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3984) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3985)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3986) int perf_event__process_attr(struct perf_tool *tool __maybe_unused,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3987) union perf_event *event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3988) struct evlist **pevlist)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3989) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3990) u32 i, ids, n_ids;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3991) struct evsel *evsel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3992) struct evlist *evlist = *pevlist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3993)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3994) if (evlist == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3995) *pevlist = evlist = evlist__new();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3996) if (evlist == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3997) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3998) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3999)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4000) evsel = evsel__new(&event->attr.attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4001) if (evsel == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4002) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4003)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4004) evlist__add(evlist, evsel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4005)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4006) ids = event->header.size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4007) ids -= (void *)&event->attr.id - (void *)event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4008) n_ids = ids / sizeof(u64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4009) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4010) * We don't have the cpu and thread maps on the header, so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4011) * for allocating the perf_sample_id table we fake 1 cpu and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4012) * hattr->ids threads.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4013) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4014) if (perf_evsel__alloc_id(&evsel->core, 1, n_ids))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4015) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4016)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4017) for (i = 0; i < n_ids; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4018) perf_evlist__id_add(&evlist->core, &evsel->core, 0, i, event->attr.id[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4019) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4020)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4021) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4022) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4023)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4024) int perf_event__process_event_update(struct perf_tool *tool __maybe_unused,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4025) union perf_event *event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4026) struct evlist **pevlist)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4027) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4028) struct perf_record_event_update *ev = &event->event_update;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4029) struct perf_record_event_update_scale *ev_scale;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4030) struct perf_record_event_update_cpus *ev_cpus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4031) struct evlist *evlist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4032) struct evsel *evsel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4033) struct perf_cpu_map *map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4034)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4035) if (!pevlist || *pevlist == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4036) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4037)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4038) evlist = *pevlist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4039)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4040) evsel = perf_evlist__id2evsel(evlist, ev->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4041) if (evsel == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4042) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4043)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4044) switch (ev->type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4045) case PERF_EVENT_UPDATE__UNIT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4046) evsel->unit = strdup(ev->data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4047) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4048) case PERF_EVENT_UPDATE__NAME:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4049) evsel->name = strdup(ev->data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4050) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4051) case PERF_EVENT_UPDATE__SCALE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4052) ev_scale = (struct perf_record_event_update_scale *)ev->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4053) evsel->scale = ev_scale->scale;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4054) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4055) case PERF_EVENT_UPDATE__CPUS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4056) ev_cpus = (struct perf_record_event_update_cpus *)ev->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4057)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4058) map = cpu_map__new_data(&ev_cpus->cpus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4059) if (map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4060) evsel->core.own_cpus = map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4061) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4062) pr_err("failed to get event_update cpus\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4063) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4064) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4065) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4066)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4067) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4068) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4069)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4070) int perf_event__process_tracing_data(struct perf_session *session,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4071) union perf_event *event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4072) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4073) ssize_t size_read, padding, size = event->tracing_data.size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4074) int fd = perf_data__fd(session->data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4075) char buf[BUFSIZ];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4076)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4077) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4078) * The pipe fd is already in proper place and in any case
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4079) * we can't move it, and we'd screw the case where we read
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4080) * 'pipe' data from regular file. The trace_report reads
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4081) * data from 'fd' so we need to set it directly behind the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4082) * event, where the tracing data starts.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4083) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4084) if (!perf_data__is_pipe(session->data)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4085) off_t offset = lseek(fd, 0, SEEK_CUR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4086)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4087) /* setup for reading amidst mmap */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4088) lseek(fd, offset + sizeof(struct perf_record_header_tracing_data),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4089) SEEK_SET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4090) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4091)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4092) size_read = trace_report(fd, &session->tevent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4093) session->repipe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4094) padding = PERF_ALIGN(size_read, sizeof(u64)) - size_read;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4095)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4096) if (readn(fd, buf, padding) < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4097) pr_err("%s: reading input file", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4098) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4099) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4100) if (session->repipe) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4101) int retw = write(STDOUT_FILENO, buf, padding);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4102) if (retw <= 0 || retw != padding) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4103) pr_err("%s: repiping tracing data padding", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4104) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4105) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4106) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4108) if (size_read + padding != size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4109) pr_err("%s: tracing data size mismatch", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4110) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4111) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4113) perf_evlist__prepare_tracepoint_events(session->evlist,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4114) session->tevent.pevent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4116) return size_read + padding;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4117) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4119) int perf_event__process_build_id(struct perf_session *session,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4120) union perf_event *event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4121) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4122) __event_process_build_id(&event->build_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4123) event->build_id.filename,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4124) session);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4125) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4126) }