Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2) #include <errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3) #include <inttypes.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4) #include <linux/err.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6) #include <linux/zalloc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7) #include <api/fs/fs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9) #include <byteswap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10) #include <unistd.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11) #include <sys/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12) #include <sys/mman.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13) #include <perf/cpumap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15) #include "map_symbol.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16) #include "branch.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17) #include "debug.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18) #include "evlist.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19) #include "evsel.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20) #include "memswap.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21) #include "map.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22) #include "symbol.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23) #include "session.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24) #include "tool.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25) #include "perf_regs.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26) #include "asm/bug.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27) #include "auxtrace.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28) #include "thread.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29) #include "thread-stack.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30) #include "sample-raw.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31) #include "stat.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32) #include "ui/progress.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33) #include "../perf.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34) #include "arch/common.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35) #include <internal/lib.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37) #ifdef HAVE_ZSTD_SUPPORT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38) static int perf_session__process_compressed_event(struct perf_session *session,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39) 						  union perf_event *event, u64 file_offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41) 	void *src;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42) 	size_t decomp_size, src_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43) 	u64 decomp_last_rem = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44) 	size_t mmap_len, decomp_len = session->header.env.comp_mmap_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45) 	struct decomp *decomp, *decomp_last = session->decomp_last;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47) 	if (decomp_last) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48) 		decomp_last_rem = decomp_last->size - decomp_last->head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49) 		decomp_len += decomp_last_rem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52) 	mmap_len = sizeof(struct decomp) + decomp_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53) 	decomp = mmap(NULL, mmap_len, PROT_READ|PROT_WRITE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54) 		      MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55) 	if (decomp == MAP_FAILED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56) 		pr_err("Couldn't allocate memory for decompression\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57) 		return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60) 	decomp->file_pos = file_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61) 	decomp->mmap_len = mmap_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62) 	decomp->head = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64) 	if (decomp_last_rem) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65) 		memcpy(decomp->data, &(decomp_last->data[decomp_last->head]), decomp_last_rem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66) 		decomp->size = decomp_last_rem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) 	src = (void *)event + sizeof(struct perf_record_compressed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) 	src_size = event->pack.header.size - sizeof(struct perf_record_compressed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72) 	decomp_size = zstd_decompress_stream(&(session->zstd_data), src, src_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73) 				&(decomp->data[decomp_last_rem]), decomp_len - decomp_last_rem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74) 	if (!decomp_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75) 		munmap(decomp, mmap_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76) 		pr_err("Couldn't decompress data\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77) 		return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80) 	decomp->size += decomp_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) 	if (session->decomp == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83) 		session->decomp = decomp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84) 		session->decomp_last = decomp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86) 		session->decomp_last->next = decomp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87) 		session->decomp_last = decomp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) 	pr_debug("decomp (B): %zd to %zd\n", src_size, decomp_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) #else /* !HAVE_ZSTD_SUPPORT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95) #define perf_session__process_compressed_event perf_session__process_compressed_event_stub
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98) static int perf_session__deliver_event(struct perf_session *session,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99) 				       union perf_event *event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100) 				       struct perf_tool *tool,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101) 				       u64 file_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103) static int perf_session__open(struct perf_session *session)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) 	struct perf_data *data = session->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) 	if (perf_session__read_header(session) < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) 		pr_err("incompatible file format (rerun with -v to learn more)\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) 		return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) 	if (perf_data__is_pipe(data))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115) 	if (perf_header__has_feat(&session->header, HEADER_STAT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118) 	if (!evlist__valid_sample_type(session->evlist)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119) 		pr_err("non matching sample_type\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120) 		return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123) 	if (!evlist__valid_sample_id_all(session->evlist)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124) 		pr_err("non matching sample_id_all\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125) 		return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128) 	if (!perf_evlist__valid_read_format(session->evlist)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129) 		pr_err("non matching read_format\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130) 		return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136) void perf_session__set_id_hdr_size(struct perf_session *session)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138) 	u16 id_hdr_size = perf_evlist__id_hdr_size(session->evlist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140) 	machines__set_id_hdr_size(&session->machines, id_hdr_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143) int perf_session__create_kernel_maps(struct perf_session *session)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145) 	int ret = machine__create_kernel_maps(&session->machines.host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147) 	if (ret >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148) 		ret = machines__create_guest_kernel_maps(&session->machines);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152) static void perf_session__destroy_kernel_maps(struct perf_session *session)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154) 	machines__destroy_kernel_maps(&session->machines);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157) static bool perf_session__has_comm_exec(struct perf_session *session)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159) 	struct evsel *evsel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161) 	evlist__for_each_entry(session->evlist, evsel) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162) 		if (evsel->core.attr.comm_exec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) 			return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) 	return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) static void perf_session__set_comm_exec(struct perf_session *session)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) 	bool comm_exec = perf_session__has_comm_exec(session);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173) 	machines__set_comm_exec(&session->machines, comm_exec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176) static int ordered_events__deliver_event(struct ordered_events *oe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177) 					 struct ordered_event *event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179) 	struct perf_session *session = container_of(oe, struct perf_session,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180) 						    ordered_events);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182) 	return perf_session__deliver_event(session, event->event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183) 					   session->tool, event->file_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186) struct perf_session *perf_session__new(struct perf_data *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187) 				       bool repipe, struct perf_tool *tool)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189) 	int ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190) 	struct perf_session *session = zalloc(sizeof(*session));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192) 	if (!session)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195) 	session->repipe = repipe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196) 	session->tool   = tool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197) 	INIT_LIST_HEAD(&session->auxtrace_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198) 	machines__init(&session->machines);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199) 	ordered_events__init(&session->ordered_events,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200) 			     ordered_events__deliver_event, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202) 	perf_env__init(&session->header.env);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) 	if (data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) 		ret = perf_data__open(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) 		if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) 			goto out_delete;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) 		session->data = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) 		if (perf_data__is_read(data)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) 			ret = perf_session__open(session);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) 			if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213) 				goto out_delete;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216) 			 * set session attributes that are present in perf.data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217) 			 * but not in pipe-mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219) 			if (!data->is_pipe) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220) 				perf_session__set_id_hdr_size(session);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221) 				perf_session__set_comm_exec(session);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224) 			perf_evlist__init_trace_event_sample_raw(session->evlist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226) 			/* Open the directory data. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227) 			if (data->is_dir) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228) 				ret = perf_data__open_dir(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229) 				if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230) 					goto out_delete;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) 			if (!symbol_conf.kallsyms_name &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) 			    !symbol_conf.vmlinux_name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) 				symbol_conf.kallsyms_name = perf_data__kallsyms_name(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) 	} else  {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) 		session->machines.host.env = &perf_env;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) 	session->machines.host.single_address_space =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) 		perf_env__single_address_space(session->machines.host.env);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) 	if (!data || perf_data__is_write(data)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) 		 * In O_RDONLY mode this will be performed when reading the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247) 		 * kernel MMAP event, in perf_event__process_mmap().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) 		if (perf_session__create_kernel_maps(session) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) 			pr_warning("Cannot read kernel map\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254) 	 * In pipe-mode, evlist is empty until PERF_RECORD_HEADER_ATTR is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255) 	 * processed, so evlist__sample_id_all is not meaningful here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257) 	if ((!data || !data->is_pipe) && tool && tool->ordering_requires_timestamps &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258) 	    tool->ordered_events && !evlist__sample_id_all(session->evlist)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259) 		dump_printf("WARNING: No sample_id_all support, falling back to unordered processing\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260) 		tool->ordered_events = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263) 	return session;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265)  out_delete:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266) 	perf_session__delete(session);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267)  out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) 	return ERR_PTR(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) static void perf_session__delete_threads(struct perf_session *session)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) 	machine__delete_threads(&session->machines.host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) static void perf_session__release_decomp_events(struct perf_session *session)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) 	struct decomp *next, *decomp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279) 	size_t mmap_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280) 	next = session->decomp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282) 		decomp = next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283) 		if (decomp == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285) 		next = decomp->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286) 		mmap_len = decomp->mmap_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287) 		munmap(decomp, mmap_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) 	} while (1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291) void perf_session__delete(struct perf_session *session)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293) 	if (session == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295) 	auxtrace__free(session);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) 	auxtrace_index__free(&session->auxtrace_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) 	perf_session__destroy_kernel_maps(session);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) 	perf_session__delete_threads(session);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) 	perf_session__release_decomp_events(session);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) 	perf_env__exit(&session->header.env);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) 	machines__exit(&session->machines);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302) 	if (session->data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) 		perf_data__close(session->data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) 	free(session);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) static int process_event_synth_tracing_data_stub(struct perf_session *session
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) 						 __maybe_unused,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309) 						 union perf_event *event
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310) 						 __maybe_unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312) 	dump_printf(": unhandled!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316) static int process_event_synth_attr_stub(struct perf_tool *tool __maybe_unused,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317) 					 union perf_event *event __maybe_unused,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318) 					 struct evlist **pevlist
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) 					 __maybe_unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321) 	dump_printf(": unhandled!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) static int process_event_synth_event_update_stub(struct perf_tool *tool __maybe_unused,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326) 						 union perf_event *event __maybe_unused,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327) 						 struct evlist **pevlist
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328) 						 __maybe_unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330) 	if (dump_trace)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331) 		perf_event__fprintf_event_update(event, stdout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333) 	dump_printf(": unhandled!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337) static int process_event_sample_stub(struct perf_tool *tool __maybe_unused,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338) 				     union perf_event *event __maybe_unused,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339) 				     struct perf_sample *sample __maybe_unused,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340) 				     struct evsel *evsel __maybe_unused,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341) 				     struct machine *machine __maybe_unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343) 	dump_printf(": unhandled!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) static int process_event_stub(struct perf_tool *tool __maybe_unused,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) 			      union perf_event *event __maybe_unused,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) 			      struct perf_sample *sample __maybe_unused,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) 			      struct machine *machine __maybe_unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) 	dump_printf(": unhandled!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356) static int process_finished_round_stub(struct perf_tool *tool __maybe_unused,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357) 				       union perf_event *event __maybe_unused,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358) 				       struct ordered_events *oe __maybe_unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) 	dump_printf(": unhandled!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364) static int process_finished_round(struct perf_tool *tool,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365) 				  union perf_event *event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) 				  struct ordered_events *oe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) static int skipn(int fd, off_t n)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) 	char buf[4096];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) 	ssize_t ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373) 	while (n > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) 		ret = read(fd, buf, min(n, (off_t)sizeof(buf)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) 		if (ret <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) 			return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) 		n -= ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) static s64 process_event_auxtrace_stub(struct perf_session *session __maybe_unused,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) 				       union perf_event *event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) 	dump_printf(": unhandled!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) 	if (perf_data__is_pipe(session->data))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) 		skipn(perf_data__fd(session->data), event->auxtrace.size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) 	return event->auxtrace.size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) static int process_event_op2_stub(struct perf_session *session __maybe_unused,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) 				  union perf_event *event __maybe_unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) 	dump_printf(": unhandled!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) static
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) int process_event_thread_map_stub(struct perf_session *session __maybe_unused,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) 				  union perf_event *event __maybe_unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) 	if (dump_trace)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) 		perf_event__fprintf_thread_map(event, stdout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) 	dump_printf(": unhandled!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411) static
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) int process_event_cpu_map_stub(struct perf_session *session __maybe_unused,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) 			       union perf_event *event __maybe_unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) 	if (dump_trace)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) 		perf_event__fprintf_cpu_map(event, stdout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) 	dump_printf(": unhandled!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) static
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423) int process_event_stat_config_stub(struct perf_session *session __maybe_unused,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) 				   union perf_event *event __maybe_unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426) 	if (dump_trace)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427) 		perf_event__fprintf_stat_config(event, stdout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) 	dump_printf(": unhandled!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) static int process_stat_stub(struct perf_session *perf_session __maybe_unused,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) 			     union perf_event *event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) 	if (dump_trace)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) 		perf_event__fprintf_stat(event, stdout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) 	dump_printf(": unhandled!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) static int process_stat_round_stub(struct perf_session *perf_session __maybe_unused,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) 				   union perf_event *event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) 	if (dump_trace)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) 		perf_event__fprintf_stat_round(event, stdout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) 	dump_printf(": unhandled!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) static int perf_session__process_compressed_event_stub(struct perf_session *session __maybe_unused,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) 						       union perf_event *event __maybe_unused,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) 						       u64 file_offset __maybe_unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457)        dump_printf(": unhandled!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458)        return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) void perf_tool__fill_defaults(struct perf_tool *tool)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) 	if (tool->sample == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) 		tool->sample = process_event_sample_stub;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) 	if (tool->mmap == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) 		tool->mmap = process_event_stub;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) 	if (tool->mmap2 == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) 		tool->mmap2 = process_event_stub;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) 	if (tool->comm == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) 		tool->comm = process_event_stub;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) 	if (tool->namespaces == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) 		tool->namespaces = process_event_stub;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) 	if (tool->cgroup == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) 		tool->cgroup = process_event_stub;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) 	if (tool->fork == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) 		tool->fork = process_event_stub;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) 	if (tool->exit == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478) 		tool->exit = process_event_stub;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) 	if (tool->lost == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) 		tool->lost = perf_event__process_lost;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) 	if (tool->lost_samples == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) 		tool->lost_samples = perf_event__process_lost_samples;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) 	if (tool->aux == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) 		tool->aux = perf_event__process_aux;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) 	if (tool->itrace_start == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) 		tool->itrace_start = perf_event__process_itrace_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) 	if (tool->context_switch == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) 		tool->context_switch = perf_event__process_switch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) 	if (tool->ksymbol == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) 		tool->ksymbol = perf_event__process_ksymbol;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) 	if (tool->bpf == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) 		tool->bpf = perf_event__process_bpf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) 	if (tool->text_poke == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) 		tool->text_poke = perf_event__process_text_poke;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) 	if (tool->read == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) 		tool->read = process_event_sample_stub;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497) 	if (tool->throttle == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) 		tool->throttle = process_event_stub;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) 	if (tool->unthrottle == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) 		tool->unthrottle = process_event_stub;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) 	if (tool->attr == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) 		tool->attr = process_event_synth_attr_stub;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) 	if (tool->event_update == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) 		tool->event_update = process_event_synth_event_update_stub;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) 	if (tool->tracing_data == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) 		tool->tracing_data = process_event_synth_tracing_data_stub;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) 	if (tool->build_id == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) 		tool->build_id = process_event_op2_stub;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) 	if (tool->finished_round == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) 		if (tool->ordered_events)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) 			tool->finished_round = process_finished_round;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) 			tool->finished_round = process_finished_round_stub;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) 	if (tool->id_index == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516) 		tool->id_index = process_event_op2_stub;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) 	if (tool->auxtrace_info == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) 		tool->auxtrace_info = process_event_op2_stub;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) 	if (tool->auxtrace == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) 		tool->auxtrace = process_event_auxtrace_stub;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) 	if (tool->auxtrace_error == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) 		tool->auxtrace_error = process_event_op2_stub;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) 	if (tool->thread_map == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) 		tool->thread_map = process_event_thread_map_stub;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) 	if (tool->cpu_map == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) 		tool->cpu_map = process_event_cpu_map_stub;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) 	if (tool->stat_config == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) 		tool->stat_config = process_event_stat_config_stub;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529) 	if (tool->stat == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530) 		tool->stat = process_stat_stub;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) 	if (tool->stat_round == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) 		tool->stat_round = process_stat_round_stub;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) 	if (tool->time_conv == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) 		tool->time_conv = process_event_op2_stub;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) 	if (tool->feature == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) 		tool->feature = process_event_op2_stub;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) 	if (tool->compressed == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) 		tool->compressed = perf_session__process_compressed_event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) static void swap_sample_id_all(union perf_event *event, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) 	void *end = (void *) event + event->header.size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) 	int size = end - data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) 	BUG_ON(size % sizeof(u64));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) 	mem_bswap_64(data, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) static void perf_event__all64_swap(union perf_event *event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) 				   bool sample_id_all __maybe_unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553) 	struct perf_event_header *hdr = &event->header;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554) 	mem_bswap_64(hdr + 1, event->header.size - sizeof(*hdr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557) static void perf_event__comm_swap(union perf_event *event, bool sample_id_all)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559) 	event->comm.pid = bswap_32(event->comm.pid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560) 	event->comm.tid = bswap_32(event->comm.tid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562) 	if (sample_id_all) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563) 		void *data = &event->comm.comm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) 		data += PERF_ALIGN(strlen(data) + 1, sizeof(u64));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) 		swap_sample_id_all(event, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570) static void perf_event__mmap_swap(union perf_event *event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571) 				  bool sample_id_all)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573) 	event->mmap.pid	  = bswap_32(event->mmap.pid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574) 	event->mmap.tid	  = bswap_32(event->mmap.tid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575) 	event->mmap.start = bswap_64(event->mmap.start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576) 	event->mmap.len	  = bswap_64(event->mmap.len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577) 	event->mmap.pgoff = bswap_64(event->mmap.pgoff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) 	if (sample_id_all) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) 		void *data = &event->mmap.filename;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) 		data += PERF_ALIGN(strlen(data) + 1, sizeof(u64));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583) 		swap_sample_id_all(event, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) static void perf_event__mmap2_swap(union perf_event *event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588) 				  bool sample_id_all)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590) 	event->mmap2.pid   = bswap_32(event->mmap2.pid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) 	event->mmap2.tid   = bswap_32(event->mmap2.tid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) 	event->mmap2.start = bswap_64(event->mmap2.start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) 	event->mmap2.len   = bswap_64(event->mmap2.len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) 	event->mmap2.pgoff = bswap_64(event->mmap2.pgoff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) 	event->mmap2.maj   = bswap_32(event->mmap2.maj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) 	event->mmap2.min   = bswap_32(event->mmap2.min);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) 	event->mmap2.ino   = bswap_64(event->mmap2.ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) 	event->mmap2.ino_generation = bswap_64(event->mmap2.ino_generation);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) 	if (sample_id_all) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) 		void *data = &event->mmap2.filename;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) 		data += PERF_ALIGN(strlen(data) + 1, sizeof(u64));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) 		swap_sample_id_all(event, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) static void perf_event__task_swap(union perf_event *event, bool sample_id_all)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) 	event->fork.pid	 = bswap_32(event->fork.pid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) 	event->fork.tid	 = bswap_32(event->fork.tid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) 	event->fork.ppid = bswap_32(event->fork.ppid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) 	event->fork.ptid = bswap_32(event->fork.ptid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) 	event->fork.time = bswap_64(event->fork.time);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) 	if (sample_id_all)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) 		swap_sample_id_all(event, &event->fork + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) static void perf_event__read_swap(union perf_event *event, bool sample_id_all)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) 	event->read.pid		 = bswap_32(event->read.pid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) 	event->read.tid		 = bswap_32(event->read.tid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) 	event->read.value	 = bswap_64(event->read.value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) 	event->read.time_enabled = bswap_64(event->read.time_enabled);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) 	event->read.time_running = bswap_64(event->read.time_running);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) 	event->read.id		 = bswap_64(event->read.id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) 	if (sample_id_all)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) 		swap_sample_id_all(event, &event->read + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) static void perf_event__aux_swap(union perf_event *event, bool sample_id_all)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) 	event->aux.aux_offset = bswap_64(event->aux.aux_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) 	event->aux.aux_size   = bswap_64(event->aux.aux_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) 	event->aux.flags      = bswap_64(event->aux.flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) 	if (sample_id_all)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639) 		swap_sample_id_all(event, &event->aux + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642) static void perf_event__itrace_start_swap(union perf_event *event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643) 					  bool sample_id_all)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) 	event->itrace_start.pid	 = bswap_32(event->itrace_start.pid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646) 	event->itrace_start.tid	 = bswap_32(event->itrace_start.tid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) 	if (sample_id_all)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) 		swap_sample_id_all(event, &event->itrace_start + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) static void perf_event__switch_swap(union perf_event *event, bool sample_id_all)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) 	if (event->header.type == PERF_RECORD_SWITCH_CPU_WIDE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) 		event->context_switch.next_prev_pid =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656) 				bswap_32(event->context_switch.next_prev_pid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) 		event->context_switch.next_prev_tid =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) 				bswap_32(event->context_switch.next_prev_tid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) 	if (sample_id_all)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) 		swap_sample_id_all(event, &event->context_switch + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) static void perf_event__text_poke_swap(union perf_event *event, bool sample_id_all)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) 	event->text_poke.addr    = bswap_64(event->text_poke.addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) 	event->text_poke.old_len = bswap_16(event->text_poke.old_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) 	event->text_poke.new_len = bswap_16(event->text_poke.new_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) 	if (sample_id_all) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) 		size_t len = sizeof(event->text_poke.old_len) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) 			     sizeof(event->text_poke.new_len) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) 			     event->text_poke.old_len +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) 			     event->text_poke.new_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) 		void *data = &event->text_poke.old_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678) 		data += PERF_ALIGN(len, sizeof(u64));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) 		swap_sample_id_all(event, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) static void perf_event__throttle_swap(union perf_event *event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) 				      bool sample_id_all)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686) 	event->throttle.time	  = bswap_64(event->throttle.time);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) 	event->throttle.id	  = bswap_64(event->throttle.id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) 	event->throttle.stream_id = bswap_64(event->throttle.stream_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) 	if (sample_id_all)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) 		swap_sample_id_all(event, &event->throttle + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) static void perf_event__namespaces_swap(union perf_event *event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) 					bool sample_id_all)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697) 	u64 i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) 	event->namespaces.pid		= bswap_32(event->namespaces.pid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) 	event->namespaces.tid		= bswap_32(event->namespaces.tid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) 	event->namespaces.nr_namespaces	= bswap_64(event->namespaces.nr_namespaces);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) 	for (i = 0; i < event->namespaces.nr_namespaces; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) 		struct perf_ns_link_info *ns = &event->namespaces.link_info[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) 		ns->dev = bswap_64(ns->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) 		ns->ino = bswap_64(ns->ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) 	if (sample_id_all)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) 		swap_sample_id_all(event, &event->namespaces.link_info[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) static void perf_event__cgroup_swap(union perf_event *event, bool sample_id_all)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) 	event->cgroup.id = bswap_64(event->cgroup.id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) 	if (sample_id_all) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) 		void *data = &event->cgroup.path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) 		data += PERF_ALIGN(strlen(data) + 1, sizeof(u64));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) 		swap_sample_id_all(event, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) static u8 revbyte(u8 b)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) 	int rev = (b >> 4) | ((b & 0xf) << 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) 	rev = ((rev & 0xcc) >> 2) | ((rev & 0x33) << 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) 	rev = ((rev & 0xaa) >> 1) | ((rev & 0x55) << 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) 	return (u8) rev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735)  * XXX this is hack in attempt to carry flags bitfield
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736)  * through endian village. ABI says:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738)  * Bit-fields are allocated from right to left (least to most significant)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739)  * on little-endian implementations and from left to right (most to least
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740)  * significant) on big-endian implementations.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742)  * The above seems to be byte specific, so we need to reverse each
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743)  * byte of the bitfield. 'Internet' also says this might be implementation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744)  * specific and we probably need proper fix and carry perf_event_attr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745)  * bitfield flags in separate data file FEAT_ section. Thought this seems
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746)  * to work for now.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) static void swap_bitfield(u8 *p, unsigned len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) 	unsigned i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) 	for (i = 0; i < len; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) 		*p = revbyte(*p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) 		p++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) /* exported for swapping attributes in file header */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) void perf_event__attr_swap(struct perf_event_attr *attr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) 	attr->type		= bswap_32(attr->type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762) 	attr->size		= bswap_32(attr->size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) #define bswap_safe(f, n) 					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) 	(attr->size > (offsetof(struct perf_event_attr, f) + 	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) 		       sizeof(attr->f) * (n)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) #define bswap_field(f, sz) 			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) do { 						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) 	if (bswap_safe(f, 0))			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) 		attr->f = bswap_##sz(attr->f);	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) } while(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) #define bswap_field_16(f) bswap_field(f, 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) #define bswap_field_32(f) bswap_field(f, 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) #define bswap_field_64(f) bswap_field(f, 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) 	bswap_field_64(config);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) 	bswap_field_64(sample_period);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) 	bswap_field_64(sample_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) 	bswap_field_64(read_format);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) 	bswap_field_32(wakeup_events);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) 	bswap_field_32(bp_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) 	bswap_field_64(bp_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) 	bswap_field_64(bp_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) 	bswap_field_64(branch_sample_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) 	bswap_field_64(sample_regs_user);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) 	bswap_field_32(sample_stack_user);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) 	bswap_field_32(aux_watermark);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) 	bswap_field_16(sample_max_stack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) 	bswap_field_32(aux_sample_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) 	 * After read_format are bitfields. Check read_format because
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) 	 * we are unable to use offsetof on bitfield.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) 	if (bswap_safe(read_format, 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) 		swap_bitfield((u8 *) (&attr->read_format + 1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) 			      sizeof(u64));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) #undef bswap_field_64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) #undef bswap_field_32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) #undef bswap_field
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) #undef bswap_safe
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) static void perf_event__hdr_attr_swap(union perf_event *event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) 				      bool sample_id_all __maybe_unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) 	size_t size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) 	perf_event__attr_swap(&event->attr.attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) 	size = event->header.size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) 	size -= (void *)&event->attr.id - (void *)event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) 	mem_bswap_64(event->attr.id, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) static void perf_event__event_update_swap(union perf_event *event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) 					  bool sample_id_all __maybe_unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) 	event->event_update.type = bswap_64(event->event_update.type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) 	event->event_update.id   = bswap_64(event->event_update.id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) static void perf_event__event_type_swap(union perf_event *event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) 					bool sample_id_all __maybe_unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) 	event->event_type.event_type.event_id =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) 		bswap_64(event->event_type.event_type.event_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) static void perf_event__tracing_data_swap(union perf_event *event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) 					  bool sample_id_all __maybe_unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) 	event->tracing_data.size = bswap_32(event->tracing_data.size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836) static void perf_event__auxtrace_info_swap(union perf_event *event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) 					   bool sample_id_all __maybe_unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) 	size_t size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) 	event->auxtrace_info.type = bswap_32(event->auxtrace_info.type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) 	size = event->header.size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) 	size -= (void *)&event->auxtrace_info.priv - (void *)event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) 	mem_bswap_64(event->auxtrace_info.priv, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) static void perf_event__auxtrace_swap(union perf_event *event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) 				      bool sample_id_all __maybe_unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) 	event->auxtrace.size      = bswap_64(event->auxtrace.size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) 	event->auxtrace.offset    = bswap_64(event->auxtrace.offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) 	event->auxtrace.reference = bswap_64(event->auxtrace.reference);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) 	event->auxtrace.idx       = bswap_32(event->auxtrace.idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) 	event->auxtrace.tid       = bswap_32(event->auxtrace.tid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) 	event->auxtrace.cpu       = bswap_32(event->auxtrace.cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) static void perf_event__auxtrace_error_swap(union perf_event *event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) 					    bool sample_id_all __maybe_unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) 	event->auxtrace_error.type = bswap_32(event->auxtrace_error.type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) 	event->auxtrace_error.code = bswap_32(event->auxtrace_error.code);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) 	event->auxtrace_error.cpu  = bswap_32(event->auxtrace_error.cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) 	event->auxtrace_error.pid  = bswap_32(event->auxtrace_error.pid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) 	event->auxtrace_error.tid  = bswap_32(event->auxtrace_error.tid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) 	event->auxtrace_error.fmt  = bswap_32(event->auxtrace_error.fmt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) 	event->auxtrace_error.ip   = bswap_64(event->auxtrace_error.ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) 	if (event->auxtrace_error.fmt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) 		event->auxtrace_error.time = bswap_64(event->auxtrace_error.time);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) static void perf_event__thread_map_swap(union perf_event *event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) 					bool sample_id_all __maybe_unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) 	unsigned i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) 	event->thread_map.nr = bswap_64(event->thread_map.nr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) 	for (i = 0; i < event->thread_map.nr; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) 		event->thread_map.entries[i].pid = bswap_64(event->thread_map.entries[i].pid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) static void perf_event__cpu_map_swap(union perf_event *event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) 				     bool sample_id_all __maybe_unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) 	struct perf_record_cpu_map_data *data = &event->cpu_map.data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) 	struct cpu_map_entries *cpus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) 	struct perf_record_record_cpu_map *mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) 	unsigned i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) 	data->type = bswap_64(data->type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) 	switch (data->type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) 	case PERF_CPU_MAP__CPUS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) 		cpus = (struct cpu_map_entries *)data->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) 		cpus->nr = bswap_16(cpus->nr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900) 		for (i = 0; i < cpus->nr; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) 			cpus->cpu[i] = bswap_16(cpus->cpu[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) 	case PERF_CPU_MAP__MASK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) 		mask = (struct perf_record_record_cpu_map *)data->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) 		mask->nr = bswap_16(mask->nr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) 		mask->long_size = bswap_16(mask->long_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) 		switch (mask->long_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910) 		case 4: mem_bswap_32(&mask->mask, mask->nr); break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) 		case 8: mem_bswap_64(&mask->mask, mask->nr); break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) 		default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) 			pr_err("cpu_map swap: unsupported long size\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) static void perf_event__stat_config_swap(union perf_event *event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) 					 bool sample_id_all __maybe_unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) 	u64 size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) 	size  = event->stat_config.nr * sizeof(event->stat_config.data[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) 	size += 1; /* nr item itself */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) 	mem_bswap_64(&event->stat_config.nr, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) static void perf_event__stat_swap(union perf_event *event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) 				  bool sample_id_all __maybe_unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933) 	event->stat.id     = bswap_64(event->stat.id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) 	event->stat.thread = bswap_32(event->stat.thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935) 	event->stat.cpu    = bswap_32(event->stat.cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936) 	event->stat.val    = bswap_64(event->stat.val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937) 	event->stat.ena    = bswap_64(event->stat.ena);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938) 	event->stat.run    = bswap_64(event->stat.run);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) static void perf_event__stat_round_swap(union perf_event *event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) 					bool sample_id_all __maybe_unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) 	event->stat_round.type = bswap_64(event->stat_round.type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) 	event->stat_round.time = bswap_64(event->stat_round.time);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) static void perf_event__time_conv_swap(union perf_event *event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) 				       bool sample_id_all __maybe_unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) 	event->time_conv.time_shift = bswap_64(event->time_conv.time_shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) 	event->time_conv.time_mult  = bswap_64(event->time_conv.time_mult);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) 	event->time_conv.time_zero  = bswap_64(event->time_conv.time_zero);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) 	if (event_contains(event->time_conv, time_cycles)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) 		event->time_conv.time_cycles = bswap_64(event->time_conv.time_cycles);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) 		event->time_conv.time_mask = bswap_64(event->time_conv.time_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) typedef void (*perf_event__swap_op)(union perf_event *event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) 				    bool sample_id_all);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) static perf_event__swap_op perf_event__swap_ops[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) 	[PERF_RECORD_MMAP]		  = perf_event__mmap_swap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) 	[PERF_RECORD_MMAP2]		  = perf_event__mmap2_swap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) 	[PERF_RECORD_COMM]		  = perf_event__comm_swap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) 	[PERF_RECORD_FORK]		  = perf_event__task_swap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) 	[PERF_RECORD_EXIT]		  = perf_event__task_swap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) 	[PERF_RECORD_LOST]		  = perf_event__all64_swap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) 	[PERF_RECORD_READ]		  = perf_event__read_swap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) 	[PERF_RECORD_THROTTLE]		  = perf_event__throttle_swap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) 	[PERF_RECORD_UNTHROTTLE]	  = perf_event__throttle_swap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) 	[PERF_RECORD_SAMPLE]		  = perf_event__all64_swap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) 	[PERF_RECORD_AUX]		  = perf_event__aux_swap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) 	[PERF_RECORD_ITRACE_START]	  = perf_event__itrace_start_swap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) 	[PERF_RECORD_LOST_SAMPLES]	  = perf_event__all64_swap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) 	[PERF_RECORD_SWITCH]		  = perf_event__switch_swap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) 	[PERF_RECORD_SWITCH_CPU_WIDE]	  = perf_event__switch_swap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) 	[PERF_RECORD_NAMESPACES]	  = perf_event__namespaces_swap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) 	[PERF_RECORD_CGROUP]		  = perf_event__cgroup_swap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) 	[PERF_RECORD_TEXT_POKE]		  = perf_event__text_poke_swap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) 	[PERF_RECORD_HEADER_ATTR]	  = perf_event__hdr_attr_swap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) 	[PERF_RECORD_HEADER_EVENT_TYPE]	  = perf_event__event_type_swap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) 	[PERF_RECORD_HEADER_TRACING_DATA] = perf_event__tracing_data_swap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) 	[PERF_RECORD_HEADER_BUILD_ID]	  = NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) 	[PERF_RECORD_ID_INDEX]		  = perf_event__all64_swap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) 	[PERF_RECORD_AUXTRACE_INFO]	  = perf_event__auxtrace_info_swap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) 	[PERF_RECORD_AUXTRACE]		  = perf_event__auxtrace_swap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) 	[PERF_RECORD_AUXTRACE_ERROR]	  = perf_event__auxtrace_error_swap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) 	[PERF_RECORD_THREAD_MAP]	  = perf_event__thread_map_swap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) 	[PERF_RECORD_CPU_MAP]		  = perf_event__cpu_map_swap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) 	[PERF_RECORD_STAT_CONFIG]	  = perf_event__stat_config_swap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) 	[PERF_RECORD_STAT]		  = perf_event__stat_swap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995) 	[PERF_RECORD_STAT_ROUND]	  = perf_event__stat_round_swap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996) 	[PERF_RECORD_EVENT_UPDATE]	  = perf_event__event_update_swap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) 	[PERF_RECORD_TIME_CONV]		  = perf_event__time_conv_swap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) 	[PERF_RECORD_HEADER_MAX]	  = NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002)  * When perf record finishes a pass on every buffers, it records this pseudo
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003)  * event.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004)  * We record the max timestamp t found in the pass n.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005)  * Assuming these timestamps are monotonic across cpus, we know that if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006)  * a buffer still has events with timestamps below t, they will be all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007)  * available and then read in the pass n + 1.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008)  * Hence when we start to read the pass n + 2, we can safely flush every
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009)  * events with timestamps below t.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011)  *    ============ PASS n =================
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012)  *       CPU 0         |   CPU 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013)  *                     |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014)  *    cnt1 timestamps  |   cnt2 timestamps
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015)  *          1          |         2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016)  *          2          |         3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017)  *          -          |         4  <--- max recorded
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019)  *    ============ PASS n + 1 ==============
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020)  *       CPU 0         |   CPU 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021)  *                     |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022)  *    cnt1 timestamps  |   cnt2 timestamps
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023)  *          3          |         5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024)  *          4          |         6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025)  *          5          |         7 <---- max recorded
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027)  *      Flush every events below timestamp 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029)  *    ============ PASS n + 2 ==============
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030)  *       CPU 0         |   CPU 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031)  *                     |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032)  *    cnt1 timestamps  |   cnt2 timestamps
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033)  *          6          |         8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034)  *          7          |         9
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035)  *          -          |         10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037)  *      Flush every events below timestamp 7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038)  *      etc...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) static int process_finished_round(struct perf_tool *tool __maybe_unused,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) 				  union perf_event *event __maybe_unused,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) 				  struct ordered_events *oe)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) 	if (dump_trace)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) 		fprintf(stdout, "\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) 	return ordered_events__flush(oe, OE_FLUSH__ROUND);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) int perf_session__queue_event(struct perf_session *s, union perf_event *event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) 			      u64 timestamp, u64 file_offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) 	return ordered_events__queue(&s->ordered_events, event, timestamp, file_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) static void callchain__lbr_callstack_printf(struct perf_sample *sample)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) 	struct ip_callchain *callchain = sample->callchain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) 	struct branch_stack *lbr_stack = sample->branch_stack;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) 	struct branch_entry *entries = perf_sample__branch_entries(sample);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) 	u64 kernel_callchain_nr = callchain->nr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) 	unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) 	for (i = 0; i < kernel_callchain_nr; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) 		if (callchain->ips[i] == PERF_CONTEXT_USER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) 	if ((i != kernel_callchain_nr) && lbr_stack->nr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) 		u64 total_nr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) 		 * LBR callstack can only get user call chain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) 		 * i is kernel call chain number,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) 		 * 1 is PERF_CONTEXT_USER.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) 		 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) 		 * The user call chain is stored in LBR registers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) 		 * LBR are pair registers. The caller is stored
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) 		 * in "from" register, while the callee is stored
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) 		 * in "to" register.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) 		 * For example, there is a call stack
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) 		 * "A"->"B"->"C"->"D".
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) 		 * The LBR registers will recorde like
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) 		 * "C"->"D", "B"->"C", "A"->"B".
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) 		 * So only the first "to" register and all "from"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) 		 * registers are needed to construct the whole stack.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) 		total_nr = i + 1 + lbr_stack->nr + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) 		kernel_callchain_nr = i + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) 		printf("... LBR call chain: nr:%" PRIu64 "\n", total_nr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) 		for (i = 0; i < kernel_callchain_nr; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) 			printf("..... %2d: %016" PRIx64 "\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) 			       i, callchain->ips[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) 		printf("..... %2d: %016" PRIx64 "\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) 		       (int)(kernel_callchain_nr), entries[0].to);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) 		for (i = 0; i < lbr_stack->nr; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) 			printf("..... %2d: %016" PRIx64 "\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) 			       (int)(i + kernel_callchain_nr + 1), entries[i].from);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) static void callchain__printf(struct evsel *evsel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) 			      struct perf_sample *sample)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) 	unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) 	struct ip_callchain *callchain = sample->callchain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) 	if (evsel__has_branch_callstack(evsel))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) 		callchain__lbr_callstack_printf(sample);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) 	printf("... FP chain: nr:%" PRIu64 "\n", callchain->nr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) 	for (i = 0; i < callchain->nr; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) 		printf("..... %2d: %016" PRIx64 "\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) 		       i, callchain->ips[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) static void branch_stack__printf(struct perf_sample *sample, bool callstack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) 	struct branch_entry *entries = perf_sample__branch_entries(sample);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) 	uint64_t i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) 	printf("%s: nr:%" PRIu64 "\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) 		!callstack ? "... branch stack" : "... branch callstack",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) 		sample->branch_stack->nr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) 	for (i = 0; i < sample->branch_stack->nr; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) 		struct branch_entry *e = &entries[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) 		if (!callstack) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) 			printf("..... %2"PRIu64": %016" PRIx64 " -> %016" PRIx64 " %hu cycles %s%s%s%s %x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) 				i, e->from, e->to,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) 				(unsigned short)e->flags.cycles,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) 				e->flags.mispred ? "M" : " ",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) 				e->flags.predicted ? "P" : " ",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) 				e->flags.abort ? "A" : " ",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) 				e->flags.in_tx ? "T" : " ",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) 				(unsigned)e->flags.reserved);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) 			printf("..... %2"PRIu64": %016" PRIx64 "\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) 				i, i > 0 ? e->from : e->to);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) static void regs_dump__printf(u64 mask, u64 *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) 	unsigned rid, i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) 	for_each_set_bit(rid, (unsigned long *) &mask, sizeof(mask) * 8) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) 		u64 val = regs[i++];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) 		printf(".... %-5s 0x%016" PRIx64 "\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) 		       perf_reg_name(rid), val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) static const char *regs_abi[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) 	[PERF_SAMPLE_REGS_ABI_NONE] = "none",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) 	[PERF_SAMPLE_REGS_ABI_32] = "32-bit",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) 	[PERF_SAMPLE_REGS_ABI_64] = "64-bit",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) static inline const char *regs_dump_abi(struct regs_dump *d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) 	if (d->abi > PERF_SAMPLE_REGS_ABI_64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) 		return "unknown";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) 	return regs_abi[d->abi];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) static void regs__printf(const char *type, struct regs_dump *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) 	u64 mask = regs->mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) 	printf("... %s regs: mask 0x%" PRIx64 " ABI %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) 	       type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) 	       mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) 	       regs_dump_abi(regs));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) 	regs_dump__printf(mask, regs->regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) static void regs_user__printf(struct perf_sample *sample)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) 	struct regs_dump *user_regs = &sample->user_regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) 	if (user_regs->regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) 		regs__printf("user", user_regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) static void regs_intr__printf(struct perf_sample *sample)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) 	struct regs_dump *intr_regs = &sample->intr_regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) 	if (intr_regs->regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) 		regs__printf("intr", intr_regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) static void stack_user__printf(struct stack_dump *dump)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) 	printf("... ustack: size %" PRIu64 ", offset 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) 	       dump->size, dump->offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) static void perf_evlist__print_tstamp(struct evlist *evlist,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) 				       union perf_event *event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) 				       struct perf_sample *sample)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) 	u64 sample_type = __evlist__combined_sample_type(evlist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) 	if (event->header.type != PERF_RECORD_SAMPLE &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) 	    !evlist__sample_id_all(evlist)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) 		fputs("-1 -1 ", stdout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) 	if ((sample_type & PERF_SAMPLE_CPU))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) 		printf("%u ", sample->cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) 	if (sample_type & PERF_SAMPLE_TIME)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) 		printf("%" PRIu64 " ", sample->time);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) static void sample_read__printf(struct perf_sample *sample, u64 read_format)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) 	printf("... sample_read:\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) 	if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) 		printf("...... time enabled %016" PRIx64 "\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) 		       sample->read.time_enabled);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) 	if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) 		printf("...... time running %016" PRIx64 "\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) 		       sample->read.time_running);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) 	if (read_format & PERF_FORMAT_GROUP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) 		u64 i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) 		printf(".... group nr %" PRIu64 "\n", sample->read.group.nr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) 		for (i = 0; i < sample->read.group.nr; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) 			struct sample_read_value *value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) 			value = &sample->read.group.values[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) 			printf("..... id %016" PRIx64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) 			       ", value %016" PRIx64 "\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) 			       value->id, value->value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) 	} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) 		printf("..... id %016" PRIx64 ", value %016" PRIx64 "\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) 			sample->read.one.id, sample->read.one.value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) static void dump_event(struct evlist *evlist, union perf_event *event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) 		       u64 file_offset, struct perf_sample *sample)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) 	if (!dump_trace)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) 	printf("\n%#" PRIx64 " [%#x]: event: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) 	       file_offset, event->header.size, event->header.type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) 	trace_event(event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) 	if (event->header.type == PERF_RECORD_SAMPLE && evlist->trace_event_sample_raw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) 		evlist->trace_event_sample_raw(evlist, event, sample);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) 	if (sample)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) 		perf_evlist__print_tstamp(evlist, event, sample);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) 	printf("%#" PRIx64 " [%#x]: PERF_RECORD_%s", file_offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) 	       event->header.size, perf_event__name(event->header.type));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) static void dump_sample(struct evsel *evsel, union perf_event *event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) 			struct perf_sample *sample)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) 	u64 sample_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) 	if (!dump_trace)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) 	printf("(IP, 0x%x): %d/%d: %#" PRIx64 " period: %" PRIu64 " addr: %#" PRIx64 "\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) 	       event->header.misc, sample->pid, sample->tid, sample->ip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) 	       sample->period, sample->addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) 	sample_type = evsel->core.attr.sample_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) 	if (evsel__has_callchain(evsel))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) 		callchain__printf(evsel, sample);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) 	if (evsel__has_br_stack(evsel))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) 		branch_stack__printf(sample, evsel__has_branch_callstack(evsel));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) 	if (sample_type & PERF_SAMPLE_REGS_USER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) 		regs_user__printf(sample);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) 	if (sample_type & PERF_SAMPLE_REGS_INTR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) 		regs_intr__printf(sample);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) 	if (sample_type & PERF_SAMPLE_STACK_USER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) 		stack_user__printf(&sample->user_stack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) 	if (sample_type & PERF_SAMPLE_WEIGHT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) 		printf("... weight: %" PRIu64 "\n", sample->weight);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) 	if (sample_type & PERF_SAMPLE_DATA_SRC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) 		printf(" . data_src: 0x%"PRIx64"\n", sample->data_src);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) 	if (sample_type & PERF_SAMPLE_PHYS_ADDR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) 		printf(" .. phys_addr: 0x%"PRIx64"\n", sample->phys_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) 	if (sample_type & PERF_SAMPLE_TRANSACTION)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) 		printf("... transaction: %" PRIx64 "\n", sample->transaction);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) 	if (sample_type & PERF_SAMPLE_READ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) 		sample_read__printf(sample, evsel->core.attr.read_format);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) static void dump_read(struct evsel *evsel, union perf_event *event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) 	struct perf_record_read *read_event = &event->read;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) 	u64 read_format;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) 	if (!dump_trace)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) 	printf(": %d %d %s %" PRI_lu64 "\n", event->read.pid, event->read.tid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) 	       evsel__name(evsel), event->read.value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) 	if (!evsel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) 	read_format = evsel->core.attr.read_format;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) 	if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) 		printf("... time enabled : %" PRI_lu64 "\n", read_event->time_enabled);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) 	if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) 		printf("... time running : %" PRI_lu64 "\n", read_event->time_running);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) 	if (read_format & PERF_FORMAT_ID)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) 		printf("... id           : %" PRI_lu64 "\n", read_event->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) static struct machine *machines__find_for_cpumode(struct machines *machines,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) 					       union perf_event *event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) 					       struct perf_sample *sample)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) 	struct machine *machine;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) 	if (perf_guest &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) 	    ((sample->cpumode == PERF_RECORD_MISC_GUEST_KERNEL) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) 	     (sample->cpumode == PERF_RECORD_MISC_GUEST_USER))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) 		u32 pid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) 		if (event->header.type == PERF_RECORD_MMAP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) 		    || event->header.type == PERF_RECORD_MMAP2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) 			pid = event->mmap.pid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) 			pid = sample->pid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) 		machine = machines__find(machines, pid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) 		if (!machine)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) 			machine = machines__findnew(machines, DEFAULT_GUEST_KERNEL_ID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) 		return machine;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) 	return &machines->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) static int deliver_sample_value(struct evlist *evlist,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) 				struct perf_tool *tool,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) 				union perf_event *event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) 				struct perf_sample *sample,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) 				struct sample_read_value *v,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) 				struct machine *machine)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) 	struct perf_sample_id *sid = perf_evlist__id2sid(evlist, v->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) 	struct evsel *evsel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) 	if (sid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) 		sample->id     = v->id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) 		sample->period = v->value - sid->period;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) 		sid->period    = v->value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) 	if (!sid || sid->evsel == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) 		++evlist->stats.nr_unknown_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) 	 * There's no reason to deliver sample
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) 	 * for zero period, bail out.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) 	if (!sample->period)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) 	evsel = container_of(sid->evsel, struct evsel, core);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) 	return tool->sample(tool, event, sample, evsel, machine);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) static int deliver_sample_group(struct evlist *evlist,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) 				struct perf_tool *tool,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) 				union  perf_event *event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) 				struct perf_sample *sample,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) 				struct machine *machine)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) 	int ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) 	u64 i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) 	for (i = 0; i < sample->read.group.nr; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) 		ret = deliver_sample_value(evlist, tool, event, sample,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) 					   &sample->read.group.values[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) 					   machine);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426)  perf_evlist__deliver_sample(struct evlist *evlist,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) 			     struct perf_tool *tool,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) 			     union  perf_event *event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) 			     struct perf_sample *sample,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) 			     struct evsel *evsel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) 			     struct machine *machine)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) 	/* We know evsel != NULL. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) 	u64 sample_type = evsel->core.attr.sample_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) 	u64 read_format = evsel->core.attr.read_format;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) 	/* Standard sample delivery. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) 	if (!(sample_type & PERF_SAMPLE_READ))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) 		return tool->sample(tool, event, sample, evsel, machine);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) 	/* For PERF_SAMPLE_READ we have either single or group mode. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) 	if (read_format & PERF_FORMAT_GROUP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) 		return deliver_sample_group(evlist, tool, event, sample,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) 					    machine);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) 		return deliver_sample_value(evlist, tool, event, sample,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) 					    &sample->read.one, machine);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) static int machines__deliver_event(struct machines *machines,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) 				   struct evlist *evlist,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) 				   union perf_event *event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) 				   struct perf_sample *sample,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) 				   struct perf_tool *tool, u64 file_offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) 	struct evsel *evsel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) 	struct machine *machine;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) 	dump_event(evlist, event, file_offset, sample);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) 	evsel = perf_evlist__id2evsel(evlist, sample->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) 	machine = machines__find_for_cpumode(machines, event, sample);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) 	switch (event->header.type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) 	case PERF_RECORD_SAMPLE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) 		if (evsel == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) 			++evlist->stats.nr_unknown_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) 		dump_sample(evsel, event, sample);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) 		if (machine == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) 			++evlist->stats.nr_unprocessable_samples;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) 		return perf_evlist__deliver_sample(evlist, tool, event, sample, evsel, machine);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) 	case PERF_RECORD_MMAP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) 		return tool->mmap(tool, event, sample, machine);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) 	case PERF_RECORD_MMAP2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) 		if (event->header.misc & PERF_RECORD_MISC_PROC_MAP_PARSE_TIMEOUT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) 			++evlist->stats.nr_proc_map_timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) 		return tool->mmap2(tool, event, sample, machine);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) 	case PERF_RECORD_COMM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) 		return tool->comm(tool, event, sample, machine);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) 	case PERF_RECORD_NAMESPACES:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) 		return tool->namespaces(tool, event, sample, machine);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) 	case PERF_RECORD_CGROUP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) 		return tool->cgroup(tool, event, sample, machine);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) 	case PERF_RECORD_FORK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) 		return tool->fork(tool, event, sample, machine);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) 	case PERF_RECORD_EXIT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) 		return tool->exit(tool, event, sample, machine);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) 	case PERF_RECORD_LOST:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) 		if (tool->lost == perf_event__process_lost)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) 			evlist->stats.total_lost += event->lost.lost;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) 		return tool->lost(tool, event, sample, machine);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) 	case PERF_RECORD_LOST_SAMPLES:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) 		if (tool->lost_samples == perf_event__process_lost_samples)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) 			evlist->stats.total_lost_samples += event->lost_samples.lost;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) 		return tool->lost_samples(tool, event, sample, machine);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) 	case PERF_RECORD_READ:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) 		dump_read(evsel, event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) 		return tool->read(tool, event, sample, evsel, machine);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) 	case PERF_RECORD_THROTTLE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) 		return tool->throttle(tool, event, sample, machine);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) 	case PERF_RECORD_UNTHROTTLE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) 		return tool->unthrottle(tool, event, sample, machine);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) 	case PERF_RECORD_AUX:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) 		if (tool->aux == perf_event__process_aux) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) 			if (event->aux.flags & PERF_AUX_FLAG_TRUNCATED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) 				evlist->stats.total_aux_lost += 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) 			if (event->aux.flags & PERF_AUX_FLAG_PARTIAL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) 				evlist->stats.total_aux_partial += 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) 		return tool->aux(tool, event, sample, machine);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) 	case PERF_RECORD_ITRACE_START:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) 		return tool->itrace_start(tool, event, sample, machine);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) 	case PERF_RECORD_SWITCH:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) 	case PERF_RECORD_SWITCH_CPU_WIDE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) 		return tool->context_switch(tool, event, sample, machine);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) 	case PERF_RECORD_KSYMBOL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) 		return tool->ksymbol(tool, event, sample, machine);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) 	case PERF_RECORD_BPF_EVENT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) 		return tool->bpf(tool, event, sample, machine);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) 	case PERF_RECORD_TEXT_POKE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) 		return tool->text_poke(tool, event, sample, machine);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) 		++evlist->stats.nr_unknown_events;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) 		return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) static int perf_session__deliver_event(struct perf_session *session,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) 				       union perf_event *event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) 				       struct perf_tool *tool,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) 				       u64 file_offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) 	struct perf_sample sample;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) 	ret = perf_evlist__parse_sample(session->evlist, event, &sample);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) 		pr_err("Can't parse sample, err = %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) 	ret = auxtrace__process_event(session, event, &sample, tool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) 	if (ret > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) 	ret = machines__deliver_event(&session->machines, session->evlist,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) 				      event, &sample, tool, file_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) 	if (dump_trace && sample.aux_sample.size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) 		auxtrace__dump_auxtrace_sample(session, &sample);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) static s64 perf_session__process_user_event(struct perf_session *session,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) 					    union perf_event *event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) 					    u64 file_offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) 	struct ordered_events *oe = &session->ordered_events;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) 	struct perf_tool *tool = session->tool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) 	struct perf_sample sample = { .time = 0, };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) 	int fd = perf_data__fd(session->data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) 	if (event->header.type != PERF_RECORD_COMPRESSED ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) 	    tool->compressed == perf_session__process_compressed_event_stub)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) 		dump_event(session->evlist, event, file_offset, &sample);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) 	/* These events are processed right away */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) 	switch (event->header.type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) 	case PERF_RECORD_HEADER_ATTR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) 		err = tool->attr(tool, event, &session->evlist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) 		if (err == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) 			perf_session__set_id_hdr_size(session);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) 			perf_session__set_comm_exec(session);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) 	case PERF_RECORD_EVENT_UPDATE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) 		return tool->event_update(tool, event, &session->evlist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) 	case PERF_RECORD_HEADER_EVENT_TYPE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) 		 * Depreceated, but we need to handle it for sake
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) 		 * of old data files create in pipe mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) 	case PERF_RECORD_HEADER_TRACING_DATA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) 		 * Setup for reading amidst mmap, but only when we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) 		 * are in 'file' mode. The 'pipe' fd is in proper
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) 		 * place already.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) 		if (!perf_data__is_pipe(session->data))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) 			lseek(fd, file_offset, SEEK_SET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) 		return tool->tracing_data(session, event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) 	case PERF_RECORD_HEADER_BUILD_ID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) 		return tool->build_id(session, event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) 	case PERF_RECORD_FINISHED_ROUND:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) 		return tool->finished_round(tool, event, oe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) 	case PERF_RECORD_ID_INDEX:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) 		return tool->id_index(session, event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) 	case PERF_RECORD_AUXTRACE_INFO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) 		return tool->auxtrace_info(session, event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) 	case PERF_RECORD_AUXTRACE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) 		/* setup for reading amidst mmap */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) 		lseek(fd, file_offset + event->header.size, SEEK_SET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) 		return tool->auxtrace(session, event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) 	case PERF_RECORD_AUXTRACE_ERROR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) 		perf_session__auxtrace_error_inc(session, event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) 		return tool->auxtrace_error(session, event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) 	case PERF_RECORD_THREAD_MAP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) 		return tool->thread_map(session, event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) 	case PERF_RECORD_CPU_MAP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) 		return tool->cpu_map(session, event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) 	case PERF_RECORD_STAT_CONFIG:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) 		return tool->stat_config(session, event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) 	case PERF_RECORD_STAT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) 		return tool->stat(session, event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) 	case PERF_RECORD_STAT_ROUND:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) 		return tool->stat_round(session, event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) 	case PERF_RECORD_TIME_CONV:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) 		session->time_conv = event->time_conv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) 		return tool->time_conv(session, event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) 	case PERF_RECORD_HEADER_FEATURE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) 		return tool->feature(session, event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) 	case PERF_RECORD_COMPRESSED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) 		err = tool->compressed(session, event, file_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) 		if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) 			dump_event(session->evlist, event, file_offset, &sample);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) int perf_session__deliver_synth_event(struct perf_session *session,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) 				      union perf_event *event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) 				      struct perf_sample *sample)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) 	struct evlist *evlist = session->evlist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) 	struct perf_tool *tool = session->tool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) 	events_stats__inc(&evlist->stats, event->header.type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) 	if (event->header.type >= PERF_RECORD_USER_TYPE_START)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) 		return perf_session__process_user_event(session, event, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) 	return machines__deliver_event(&session->machines, evlist, event, sample, tool, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) static void event_swap(union perf_event *event, bool sample_id_all)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) 	perf_event__swap_op swap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) 	swap = perf_event__swap_ops[event->header.type];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) 	if (swap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) 		swap(event, sample_id_all);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) int perf_session__peek_event(struct perf_session *session, off_t file_offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) 			     void *buf, size_t buf_sz,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) 			     union perf_event **event_ptr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) 			     struct perf_sample *sample)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) 	union perf_event *event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) 	size_t hdr_sz, rest;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) 	int fd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) 	if (session->one_mmap && !session->header.needs_swap) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) 		event = file_offset - session->one_mmap_offset +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) 			session->one_mmap_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) 		goto out_parse_sample;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) 	if (perf_data__is_pipe(session->data))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) 		return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) 	fd = perf_data__fd(session->data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) 	hdr_sz = sizeof(struct perf_event_header);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) 	if (buf_sz < hdr_sz)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) 		return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) 	if (lseek(fd, file_offset, SEEK_SET) == (off_t)-1 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) 	    readn(fd, buf, hdr_sz) != (ssize_t)hdr_sz)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) 		return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) 	event = (union perf_event *)buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) 	if (session->header.needs_swap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) 		perf_event_header__bswap(&event->header);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) 	if (event->header.size < hdr_sz || event->header.size > buf_sz)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) 		return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) 	buf += hdr_sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) 	rest = event->header.size - hdr_sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) 	if (readn(fd, buf, rest) != (ssize_t)rest)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) 		return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) 	if (session->header.needs_swap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) 		event_swap(event, evlist__sample_id_all(session->evlist));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) out_parse_sample:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) 	if (sample && event->header.type < PERF_RECORD_USER_TYPE_START &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) 	    perf_evlist__parse_sample(session->evlist, event, sample))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) 		return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) 	*event_ptr = event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) int perf_session__peek_events(struct perf_session *session, u64 offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) 			      u64 size, peek_events_cb_t cb, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) 	u64 max_offset = offset + size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) 	char buf[PERF_SAMPLE_MAX_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) 	union perf_event *event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) 		err = perf_session__peek_event(session, offset, buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) 					       PERF_SAMPLE_MAX_SIZE, &event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) 					       NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) 		if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) 		err = cb(session, event, offset, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) 		if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) 		offset += event->header.size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) 		if (event->header.type == PERF_RECORD_AUXTRACE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) 			offset += event->auxtrace.size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) 	} while (offset < max_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) static s64 perf_session__process_event(struct perf_session *session,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) 				       union perf_event *event, u64 file_offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) 	struct evlist *evlist = session->evlist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) 	struct perf_tool *tool = session->tool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) 	if (session->header.needs_swap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) 		event_swap(event, evlist__sample_id_all(evlist));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) 	if (event->header.type >= PERF_RECORD_HEADER_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) 	events_stats__inc(&evlist->stats, event->header.type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) 	if (event->header.type >= PERF_RECORD_USER_TYPE_START)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) 		return perf_session__process_user_event(session, event, file_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) 	if (tool->ordered_events) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) 		u64 timestamp = -1ULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) 		ret = perf_evlist__parse_sample_timestamp(evlist, event, &timestamp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) 		if (ret && ret != -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) 			return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) 		ret = perf_session__queue_event(session, event, timestamp, file_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) 		if (ret != -ETIME)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) 			return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) 	return perf_session__deliver_event(session, event, tool, file_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) void perf_event_header__bswap(struct perf_event_header *hdr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) 	hdr->type = bswap_32(hdr->type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) 	hdr->misc = bswap_16(hdr->misc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) 	hdr->size = bswap_16(hdr->size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) struct thread *perf_session__findnew(struct perf_session *session, pid_t pid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) 	return machine__findnew_thread(&session->machines.host, -1, pid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796)  * Threads are identified by pid and tid, and the idle task has pid == tid == 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797)  * So here a single thread is created for that, but actually there is a separate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798)  * idle task per cpu, so there should be one 'struct thread' per cpu, but there
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799)  * is only 1. That causes problems for some tools, requiring workarounds. For
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800)  * example get_idle_thread() in builtin-sched.c, or thread_stack__per_cpu().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) int perf_session__register_idle_thread(struct perf_session *session)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) 	struct thread *thread;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) 	int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) 	thread = machine__findnew_thread(&session->machines.host, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) 	if (thread == NULL || thread__set_comm(thread, "swapper", 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) 		pr_err("problem inserting idle task.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) 		err = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) 	if (thread == NULL || thread__set_namespaces(thread, 0, NULL)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) 		pr_err("problem inserting idle task.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) 		err = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) 	/* machine__findnew_thread() got the thread, so put it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) 	thread__put(thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) perf_session__warn_order(const struct perf_session *session)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) 	const struct ordered_events *oe = &session->ordered_events;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) 	struct evsel *evsel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) 	bool should_warn = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) 	evlist__for_each_entry(session->evlist, evsel) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) 		if (evsel->core.attr.write_backward)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) 			should_warn = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) 	if (!should_warn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) 	if (oe->nr_unordered_events != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) 		ui__warning("%u out of order events recorded.\n", oe->nr_unordered_events);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) static void perf_session__warn_about_errors(const struct perf_session *session)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) 	const struct events_stats *stats = &session->evlist->stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) 	if (session->tool->lost == perf_event__process_lost &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) 	    stats->nr_events[PERF_RECORD_LOST] != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) 		ui__warning("Processed %d events and lost %d chunks!\n\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) 			    "Check IO/CPU overload!\n\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) 			    stats->nr_events[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) 			    stats->nr_events[PERF_RECORD_LOST]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) 	if (session->tool->lost_samples == perf_event__process_lost_samples) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) 		double drop_rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) 		drop_rate = (double)stats->total_lost_samples /
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) 			    (double) (stats->nr_events[PERF_RECORD_SAMPLE] + stats->total_lost_samples);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) 		if (drop_rate > 0.05) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) 			ui__warning("Processed %" PRIu64 " samples and lost %3.2f%%!\n\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) 				    stats->nr_events[PERF_RECORD_SAMPLE] + stats->total_lost_samples,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) 				    drop_rate * 100.0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) 	if (session->tool->aux == perf_event__process_aux &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) 	    stats->total_aux_lost != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) 		ui__warning("AUX data lost %" PRIu64 " times out of %u!\n\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) 			    stats->total_aux_lost,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) 			    stats->nr_events[PERF_RECORD_AUX]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) 	if (session->tool->aux == perf_event__process_aux &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) 	    stats->total_aux_partial != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) 		bool vmm_exclusive = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) 		(void)sysfs__read_bool("module/kvm_intel/parameters/vmm_exclusive",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) 		                       &vmm_exclusive);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) 		ui__warning("AUX data had gaps in it %" PRIu64 " times out of %u!\n\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) 		            "Are you running a KVM guest in the background?%s\n\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) 			    stats->total_aux_partial,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) 			    stats->nr_events[PERF_RECORD_AUX],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) 			    vmm_exclusive ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) 			    "\nReloading kvm_intel module with vmm_exclusive=0\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) 			    "will reduce the gaps to only guest's timeslices." :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) 			    "");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) 	if (stats->nr_unknown_events != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) 		ui__warning("Found %u unknown events!\n\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) 			    "Is this an older tool processing a perf.data "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) 			    "file generated by a more recent tool?\n\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) 			    "If that is not the case, consider "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) 			    "reporting to linux-kernel@vger.kernel.org.\n\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) 			    stats->nr_unknown_events);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) 	if (stats->nr_unknown_id != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) 		ui__warning("%u samples with id not present in the header\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) 			    stats->nr_unknown_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) 	if (stats->nr_invalid_chains != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) 		ui__warning("Found invalid callchains!\n\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) 			    "%u out of %u events were discarded for this reason.\n\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) 			    "Consider reporting to linux-kernel@vger.kernel.org.\n\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) 			    stats->nr_invalid_chains,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) 			    stats->nr_events[PERF_RECORD_SAMPLE]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) 	if (stats->nr_unprocessable_samples != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) 		ui__warning("%u unprocessable samples recorded.\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) 			    "Do you have a KVM guest running and not using 'perf kvm'?\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) 			    stats->nr_unprocessable_samples);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) 	perf_session__warn_order(session);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) 	events_stats__auxtrace_error_warn(stats);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) 	if (stats->nr_proc_map_timeout != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) 		ui__warning("%d map information files for pre-existing threads were\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) 			    "not processed, if there are samples for addresses they\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) 			    "will not be resolved, you may find out which are these\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) 			    "threads by running with -v and redirecting the output\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) 			    "to a file.\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) 			    "The time limit to process proc map is too short?\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) 			    "Increase it by --proc-map-timeout\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) 			    stats->nr_proc_map_timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) static int perf_session__flush_thread_stack(struct thread *thread,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) 					    void *p __maybe_unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) 	return thread_stack__flush(thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) static int perf_session__flush_thread_stacks(struct perf_session *session)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) 	return machines__for_each_thread(&session->machines,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) 					 perf_session__flush_thread_stack,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) 					 NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) volatile int session_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) static int __perf_session__process_decomp_events(struct perf_session *session);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) static int __perf_session__process_pipe_events(struct perf_session *session)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) 	struct ordered_events *oe = &session->ordered_events;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) 	struct perf_tool *tool = session->tool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) 	int fd = perf_data__fd(session->data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) 	union perf_event *event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) 	uint32_t size, cur_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) 	void *buf = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) 	s64 skip = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) 	u64 head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) 	ssize_t err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) 	void *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) 	perf_tool__fill_defaults(tool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) 	head = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) 	cur_size = sizeof(union perf_event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) 	buf = malloc(cur_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) 	if (!buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) 		return -errno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) 	ordered_events__set_copy_on_queue(oe, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) more:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) 	event = buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) 	err = readn(fd, event, sizeof(struct perf_event_header));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) 	if (err <= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) 		if (err == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) 			goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) 		pr_err("failed to read event header\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) 		goto out_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) 	if (session->header.needs_swap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) 		perf_event_header__bswap(&event->header);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) 	size = event->header.size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) 	if (size < sizeof(struct perf_event_header)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) 		pr_err("bad event header size\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) 		goto out_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) 	if (size > cur_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) 		void *new = realloc(buf, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) 		if (!new) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) 			pr_err("failed to allocate memory to read event\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) 			goto out_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) 		buf = new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) 		cur_size = size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) 		event = buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) 	p = event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) 	p += sizeof(struct perf_event_header);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) 	if (size - sizeof(struct perf_event_header)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) 		err = readn(fd, p, size - sizeof(struct perf_event_header));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) 		if (err <= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) 			if (err == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) 				pr_err("unexpected end of event stream\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) 				goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) 			pr_err("failed to read event data\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) 			goto out_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) 	if ((skip = perf_session__process_event(session, event, head)) < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) 		pr_err("%#" PRIx64 " [%#x]: failed to process type: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) 		       head, event->header.size, event->header.type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) 		err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) 		goto out_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) 	head += size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) 	if (skip > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) 		head += skip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) 	err = __perf_session__process_decomp_events(session);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) 		goto out_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) 	if (!session_done())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) 		goto more;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) 	/* do the final flush for ordered samples */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) 	err = ordered_events__flush(oe, OE_FLUSH__FINAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) 		goto out_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) 	err = auxtrace__flush_events(session, tool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) 		goto out_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) 	err = perf_session__flush_thread_stacks(session);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) out_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) 	free(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) 	if (!tool->no_warn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) 		perf_session__warn_about_errors(session);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) 	ordered_events__free(&session->ordered_events);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) 	auxtrace__free_events(session);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) static union perf_event *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) prefetch_event(char *buf, u64 head, size_t mmap_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) 	       bool needs_swap, union perf_event *error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) 	union perf_event *event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) 	 * Ensure we have enough space remaining to read
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) 	 * the size of the event in the headers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) 	if (head + sizeof(event->header) > mmap_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) 	event = (union perf_event *)(buf + head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) 	if (needs_swap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) 		perf_event_header__bswap(&event->header);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) 	if (head + event->header.size <= mmap_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) 		return event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) 	/* We're not fetching the event so swap back again */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) 	if (needs_swap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) 		perf_event_header__bswap(&event->header);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) 	pr_debug("%s: head=%#" PRIx64 " event->header_size=%#x, mmap_size=%#zx:"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) 		 " fuzzed or compressed perf.data?\n",__func__, head, event->header.size, mmap_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) 	return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) static union perf_event *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) fetch_mmaped_event(u64 head, size_t mmap_size, char *buf, bool needs_swap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) 	return prefetch_event(buf, head, mmap_size, needs_swap, ERR_PTR(-EINVAL));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) static union perf_event *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) fetch_decomp_event(u64 head, size_t mmap_size, char *buf, bool needs_swap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) 	return prefetch_event(buf, head, mmap_size, needs_swap, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) static int __perf_session__process_decomp_events(struct perf_session *session)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) 	s64 skip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) 	u64 size, file_pos = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) 	struct decomp *decomp = session->decomp_last;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) 	if (!decomp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) 	while (decomp->head < decomp->size && !session_done()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) 		union perf_event *event = fetch_decomp_event(decomp->head, decomp->size, decomp->data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) 							     session->header.needs_swap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) 		if (!event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) 		size = event->header.size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) 		if (size < sizeof(struct perf_event_header) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) 		    (skip = perf_session__process_event(session, event, file_pos)) < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) 			pr_err("%#" PRIx64 " [%#x]: failed to process type: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) 				decomp->file_pos + decomp->head, event->header.size, event->header.type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) 		if (skip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) 			size += skip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) 		decomp->head += size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131)  * On 64bit we can mmap the data file in one go. No need for tiny mmap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132)  * slices. On 32bit we use 32MB.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) #if BITS_PER_LONG == 64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) #define MMAP_SIZE ULLONG_MAX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) #define NUM_MMAPS 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) #define MMAP_SIZE (32 * 1024 * 1024ULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) #define NUM_MMAPS 128
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) struct reader;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) typedef s64 (*reader_cb_t)(struct perf_session *session,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) 			   union perf_event *event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) 			   u64 file_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) struct reader {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) 	int		 fd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) 	u64		 data_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151) 	u64		 data_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) 	reader_cb_t	 process;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) reader__process_events(struct reader *rd, struct perf_session *session,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) 		       struct ui_progress *prog)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) 	u64 data_size = rd->data_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) 	u64 head, page_offset, file_offset, file_pos, size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161) 	int err = 0, mmap_prot, mmap_flags, map_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) 	size_t	mmap_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) 	char *buf, *mmaps[NUM_MMAPS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) 	union perf_event *event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) 	s64 skip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) 	page_offset = page_size * (rd->data_offset / page_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) 	file_offset = page_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169) 	head = rd->data_offset - page_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) 	ui_progress__init_size(prog, data_size, "Processing events...");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) 	data_size += rd->data_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) 	mmap_size = MMAP_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) 	if (mmap_size > data_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177) 		mmap_size = data_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) 		session->one_mmap = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) 	memset(mmaps, 0, sizeof(mmaps));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183) 	mmap_prot  = PROT_READ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) 	mmap_flags = MAP_SHARED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) 	if (session->header.needs_swap) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) 		mmap_prot  |= PROT_WRITE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) 		mmap_flags = MAP_PRIVATE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) remap:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) 	buf = mmap(NULL, mmap_size, mmap_prot, mmap_flags, rd->fd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) 		   file_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) 	if (buf == MAP_FAILED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194) 		pr_err("failed to mmap file\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) 		err = -errno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) 	mmaps[map_idx] = buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199) 	map_idx = (map_idx + 1) & (ARRAY_SIZE(mmaps) - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200) 	file_pos = file_offset + head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201) 	if (session->one_mmap) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202) 		session->one_mmap_addr = buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) 		session->one_mmap_offset = file_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206) more:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207) 	event = fetch_mmaped_event(head, mmap_size, buf, session->header.needs_swap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) 	if (IS_ERR(event))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) 		return PTR_ERR(event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211) 	if (!event) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212) 		if (mmaps[map_idx]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) 			munmap(mmaps[map_idx], mmap_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214) 			mmaps[map_idx] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217) 		page_offset = page_size * (head / page_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) 		file_offset += page_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219) 		head -= page_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220) 		goto remap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223) 	size = event->header.size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) 	skip = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227) 	if (size < sizeof(struct perf_event_header) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228) 	    (skip = rd->process(session, event, file_pos)) < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229) 		pr_err("%#" PRIx64 " [%#x]: failed to process type: %d [%s]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) 		       file_offset + head, event->header.size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231) 		       event->header.type, strerror(-skip));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232) 		err = skip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) 	if (skip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237) 		size += skip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239) 	head += size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240) 	file_pos += size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242) 	err = __perf_session__process_decomp_events(session);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246) 	ui_progress__update(prog, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248) 	if (session_done())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251) 	if (file_pos < data_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252) 		goto more;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258) static s64 process_simple(struct perf_session *session,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259) 			  union perf_event *event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260) 			  u64 file_offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262) 	return perf_session__process_event(session, event, file_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265) static int __perf_session__process_events(struct perf_session *session)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267) 	struct reader rd = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268) 		.fd		= perf_data__fd(session->data),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269) 		.data_size	= session->header.data_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270) 		.data_offset	= session->header.data_offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271) 		.process	= process_simple,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273) 	struct ordered_events *oe = &session->ordered_events;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274) 	struct perf_tool *tool = session->tool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275) 	struct ui_progress prog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278) 	perf_tool__fill_defaults(tool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280) 	if (rd.data_size == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281) 		return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283) 	ui_progress__init_size(&prog, rd.data_size, "Processing events...");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285) 	err = reader__process_events(&rd, session, &prog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287) 		goto out_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288) 	/* do the final flush for ordered samples */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289) 	err = ordered_events__flush(oe, OE_FLUSH__FINAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291) 		goto out_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292) 	err = auxtrace__flush_events(session, tool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294) 		goto out_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295) 	err = perf_session__flush_thread_stacks(session);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296) out_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297) 	ui_progress__finish();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298) 	if (!tool->no_warn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299) 		perf_session__warn_about_errors(session);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301) 	 * We may switching perf.data output, make ordered_events
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302) 	 * reusable.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304) 	ordered_events__reinit(&session->ordered_events);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305) 	auxtrace__free_events(session);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306) 	session->one_mmap = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310) int perf_session__process_events(struct perf_session *session)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312) 	if (perf_session__register_idle_thread(session) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315) 	if (perf_data__is_pipe(session->data))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316) 		return __perf_session__process_pipe_events(session);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318) 	return __perf_session__process_events(session);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321) bool perf_session__has_traces(struct perf_session *session, const char *msg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323) 	struct evsel *evsel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325) 	evlist__for_each_entry(session->evlist, evsel) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326) 		if (evsel->core.attr.type == PERF_TYPE_TRACEPOINT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327) 			return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330) 	pr_err("No trace sample to read. Did you call 'perf %s'?\n", msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331) 	return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334) int map__set_kallsyms_ref_reloc_sym(struct map *map, const char *symbol_name, u64 addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336) 	char *bracket;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337) 	struct ref_reloc_sym *ref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338) 	struct kmap *kmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340) 	ref = zalloc(sizeof(struct ref_reloc_sym));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341) 	if (ref == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344) 	ref->name = strdup(symbol_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2345) 	if (ref->name == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2346) 		free(ref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2347) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2348) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2349) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2350) 	bracket = strchr(ref->name, ']');
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2351) 	if (bracket)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2352) 		*bracket = '\0';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2353) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2354) 	ref->addr = addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2355) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2356) 	kmap = map__kmap(map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2357) 	if (kmap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2358) 		kmap->ref_reloc_sym = ref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2359) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2360) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2361) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2362) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2363) size_t perf_session__fprintf_dsos(struct perf_session *session, FILE *fp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2364) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2365) 	return machines__fprintf_dsos(&session->machines, fp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2366) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2367) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2368) size_t perf_session__fprintf_dsos_buildid(struct perf_session *session, FILE *fp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2369) 					  bool (skip)(struct dso *dso, int parm), int parm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2370) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2371) 	return machines__fprintf_dsos_buildid(&session->machines, fp, skip, parm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2372) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2373) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2374) size_t perf_session__fprintf_nr_events(struct perf_session *session, FILE *fp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2375) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2376) 	size_t ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2377) 	const char *msg = "";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2378) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2379) 	if (perf_header__has_feat(&session->header, HEADER_AUXTRACE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2380) 		msg = " (excludes AUX area (e.g. instruction trace) decoded / synthesized events)";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2381) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2382) 	ret = fprintf(fp, "\nAggregated stats:%s\n", msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2383) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2384) 	ret += events_stats__fprintf(&session->evlist->stats, fp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2385) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2386) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2387) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2388) size_t perf_session__fprintf(struct perf_session *session, FILE *fp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2389) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2390) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2391) 	 * FIXME: Here we have to actually print all the machines in this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2392) 	 * session, not just the host...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2393) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2394) 	return machine__fprintf(&session->machines.host, fp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2395) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2396) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2397) struct evsel *perf_session__find_first_evtype(struct perf_session *session,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2398) 					      unsigned int type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2399) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2400) 	struct evsel *pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2401) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2402) 	evlist__for_each_entry(session->evlist, pos) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2403) 		if (pos->core.attr.type == type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2404) 			return pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2405) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2406) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2407) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2408) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2409) int perf_session__cpu_bitmap(struct perf_session *session,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2410) 			     const char *cpu_list, unsigned long *cpu_bitmap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2411) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2412) 	int i, err = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2413) 	struct perf_cpu_map *map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2414) 	int nr_cpus = min(session->header.env.nr_cpus_avail, MAX_NR_CPUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2415) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2416) 	for (i = 0; i < PERF_TYPE_MAX; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2417) 		struct evsel *evsel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2418) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2419) 		evsel = perf_session__find_first_evtype(session, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2420) 		if (!evsel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2421) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2422) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2423) 		if (!(evsel->core.attr.sample_type & PERF_SAMPLE_CPU)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2424) 			pr_err("File does not contain CPU events. "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2425) 			       "Remove -C option to proceed.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2426) 			return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2427) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2428) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2429) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2430) 	map = perf_cpu_map__new(cpu_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2431) 	if (map == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2432) 		pr_err("Invalid cpu_list\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2433) 		return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2434) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2435) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2436) 	for (i = 0; i < map->nr; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2437) 		int cpu = map->map[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2438) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2439) 		if (cpu >= nr_cpus) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2440) 			pr_err("Requested CPU %d too large. "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2441) 			       "Consider raising MAX_NR_CPUS\n", cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2442) 			goto out_delete_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2443) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2444) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2445) 		set_bit(cpu, cpu_bitmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2446) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2447) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2448) 	err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2449) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2450) out_delete_map:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2451) 	perf_cpu_map__put(map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2452) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2453) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2454) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2455) void perf_session__fprintf_info(struct perf_session *session, FILE *fp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2456) 				bool full)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2457) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2458) 	if (session == NULL || fp == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2459) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2460) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2461) 	fprintf(fp, "# ========\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2462) 	perf_header__fprintf_info(session, fp, full);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2463) 	fprintf(fp, "# ========\n#\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2464) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2465) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2466) int perf_event__process_id_index(struct perf_session *session,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2467) 				 union perf_event *event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2468) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2469) 	struct evlist *evlist = session->evlist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2470) 	struct perf_record_id_index *ie = &event->id_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2471) 	size_t i, nr, max_nr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2472) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2473) 	max_nr = (ie->header.size - sizeof(struct perf_record_id_index)) /
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2474) 		 sizeof(struct id_index_entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2475) 	nr = ie->nr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2476) 	if (nr > max_nr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2477) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2478) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2479) 	if (dump_trace)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2480) 		fprintf(stdout, " nr: %zu\n", nr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2481) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2482) 	for (i = 0; i < nr; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2483) 		struct id_index_entry *e = &ie->entries[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2484) 		struct perf_sample_id *sid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2485) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2486) 		if (dump_trace) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2487) 			fprintf(stdout,	" ... id: %"PRI_lu64, e->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2488) 			fprintf(stdout,	"  idx: %"PRI_lu64, e->idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2489) 			fprintf(stdout,	"  cpu: %"PRI_ld64, e->cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2490) 			fprintf(stdout,	"  tid: %"PRI_ld64"\n", e->tid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2491) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2492) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2493) 		sid = perf_evlist__id2sid(evlist, e->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2494) 		if (!sid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2495) 			return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2496) 		sid->idx = e->idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2497) 		sid->cpu = e->cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2498) 		sid->tid = e->tid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2499) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2500) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2501) }