Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags   |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3)  * intel_pt.c: Intel Processor Trace support
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4)  * Copyright (c) 2013-2015, Intel Corporation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7) #include <inttypes.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8) #include <stdio.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9) #include <stdbool.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10) #include <errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12) #include <linux/string.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14) #include <linux/zalloc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16) #include "session.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17) #include "machine.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18) #include "memswap.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19) #include "sort.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20) #include "tool.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21) #include "event.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22) #include "evlist.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23) #include "evsel.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24) #include "map.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25) #include "color.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26) #include "thread.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27) #include "thread-stack.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28) #include "symbol.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29) #include "callchain.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30) #include "dso.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31) #include "debug.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32) #include "auxtrace.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33) #include "tsc.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34) #include "intel-pt.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35) #include "config.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36) #include "util/perf_api_probe.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37) #include "util/synthetic-events.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38) #include "time-utils.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40) #include "../arch/x86/include/uapi/asm/perf_regs.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42) #include "intel-pt-decoder/intel-pt-log.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43) #include "intel-pt-decoder/intel-pt-decoder.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44) #include "intel-pt-decoder/intel-pt-insn-decoder.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45) #include "intel-pt-decoder/intel-pt-pkt-decoder.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47) #define MAX_TIMESTAMP (~0ULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49) struct range {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50) 	u64 start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51) 	u64 end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54) struct intel_pt {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55) 	struct auxtrace auxtrace;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56) 	struct auxtrace_queues queues;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57) 	struct auxtrace_heap heap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58) 	u32 auxtrace_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59) 	struct perf_session *session;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60) 	struct machine *machine;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61) 	struct evsel *switch_evsel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62) 	struct thread *unknown_thread;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63) 	bool timeless_decoding;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64) 	bool sampling_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65) 	bool snapshot_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66) 	bool per_cpu_mmaps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67) 	bool have_tsc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68) 	bool data_queued;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) 	bool est_tsc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) 	bool sync_switch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) 	bool mispred_all;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72) 	bool use_thread_stack;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73) 	bool callstack;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74) 	unsigned int br_stack_sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75) 	unsigned int br_stack_sz_plus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76) 	int have_sched_switch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77) 	u32 pmu_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78) 	u64 kernel_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79) 	u64 switch_ip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80) 	u64 ptss_ip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) 	struct perf_tsc_conversion tc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83) 	bool cap_user_time_zero;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85) 	struct itrace_synth_opts synth_opts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87) 	bool sample_instructions;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88) 	u64 instructions_sample_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89) 	u64 instructions_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) 	bool sample_branches;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92) 	u32 branches_filter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) 	u64 branches_sample_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) 	u64 branches_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96) 	bool sample_transactions;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97) 	u64 transactions_sample_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98) 	u64 transactions_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100) 	bool sample_ptwrites;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101) 	u64 ptwrites_sample_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102) 	u64 ptwrites_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104) 	bool sample_pwr_events;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) 	u64 pwr_events_sample_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) 	u64 mwait_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) 	u64 pwre_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) 	u64 exstop_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) 	u64 pwrx_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) 	u64 cbr_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) 	bool sample_pebs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113) 	struct evsel *pebs_evsel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115) 	u64 tsc_bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116) 	u64 mtc_bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) 	u64 mtc_freq_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118) 	u32 tsc_ctc_ratio_n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119) 	u32 tsc_ctc_ratio_d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120) 	u64 cyc_bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121) 	u64 noretcomp_bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122) 	unsigned max_non_turbo_ratio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123) 	unsigned cbr2khz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125) 	unsigned long num_events;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127) 	char *filter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128) 	struct addr_filters filts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130) 	struct range *time_ranges;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131) 	unsigned int range_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133) 	struct ip_callchain *chain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134) 	struct branch_stack *br_stack;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137) enum switch_state {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138) 	INTEL_PT_SS_NOT_TRACING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139) 	INTEL_PT_SS_UNKNOWN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140) 	INTEL_PT_SS_TRACING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141) 	INTEL_PT_SS_EXPECTING_SWITCH_EVENT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142) 	INTEL_PT_SS_EXPECTING_SWITCH_IP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145) struct intel_pt_queue {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146) 	struct intel_pt *pt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147) 	unsigned int queue_nr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148) 	struct auxtrace_buffer *buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149) 	struct auxtrace_buffer *old_buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150) 	void *decoder;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151) 	const struct intel_pt_state *state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152) 	struct ip_callchain *chain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153) 	struct branch_stack *last_branch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154) 	union perf_event *event_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155) 	bool on_heap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156) 	bool stop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157) 	bool step_through_buffers;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158) 	bool use_buffer_pid_tid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159) 	bool sync_switch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160) 	pid_t pid, tid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161) 	int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162) 	int switch_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) 	pid_t next_tid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) 	struct thread *thread;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) 	bool exclude_kernel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) 	bool have_sample;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) 	u64 time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) 	u64 timestamp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) 	u64 sel_timestamp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) 	bool sel_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) 	unsigned int sel_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) 	u32 flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173) 	u16 insn_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174) 	u64 last_insn_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175) 	u64 ipc_insn_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176) 	u64 ipc_cyc_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177) 	u64 last_in_insn_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178) 	u64 last_in_cyc_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179) 	u64 last_br_insn_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180) 	u64 last_br_cyc_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181) 	unsigned int cbr_seen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182) 	char insn[INTEL_PT_INSN_BUF_SZ];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185) static void intel_pt_dump(struct intel_pt *pt __maybe_unused,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186) 			  unsigned char *buf, size_t len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188) 	struct intel_pt_pkt packet;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189) 	size_t pos = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190) 	int ret, pkt_len, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191) 	char desc[INTEL_PT_PKT_DESC_MAX];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192) 	const char *color = PERF_COLOR_BLUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193) 	enum intel_pt_pkt_ctx ctx = INTEL_PT_NO_CTX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195) 	color_fprintf(stdout, color,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196) 		      ". ... Intel Processor Trace data: size %zu bytes\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197) 		      len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199) 	while (len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200) 		ret = intel_pt_get_packet(buf, len, &packet, &ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201) 		if (ret > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202) 			pkt_len = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) 			pkt_len = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) 		printf(".");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) 		color_fprintf(stdout, color, "  %08x: ", pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) 		for (i = 0; i < pkt_len; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) 			color_fprintf(stdout, color, " %02x", buf[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) 		for (; i < 16; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) 			color_fprintf(stdout, color, "   ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) 		if (ret > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) 			ret = intel_pt_pkt_desc(&packet, desc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213) 						INTEL_PT_PKT_DESC_MAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214) 			if (ret > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215) 				color_fprintf(stdout, color, " %s\n", desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217) 			color_fprintf(stdout, color, " Bad packet!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219) 		pos += pkt_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220) 		buf += pkt_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221) 		len -= pkt_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225) static void intel_pt_dump_event(struct intel_pt *pt, unsigned char *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226) 				size_t len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228) 	printf(".\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229) 	intel_pt_dump(pt, buf, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) static void intel_pt_log_event(union perf_event *event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) 	FILE *f = intel_pt_log_fp();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) 	if (!intel_pt_enable_logging || !f)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) 	perf_event__fprintf(event, NULL, f);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) static void intel_pt_dump_sample(struct perf_session *session,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) 				 struct perf_sample *sample)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) 	struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) 					   auxtrace);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248) 	printf("\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) 	intel_pt_dump(pt, sample->aux_sample.data, sample->aux_sample.size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) static bool intel_pt_log_events(struct intel_pt *pt, u64 tm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254) 	struct perf_time_interval *range = pt->synth_opts.ptime_range;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255) 	int n = pt->synth_opts.range_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257) 	if (pt->synth_opts.log_plus_flags & AUXTRACE_LOG_FLG_ALL_PERF_EVTS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260) 	if (pt->synth_opts.log_minus_flags & AUXTRACE_LOG_FLG_ALL_PERF_EVTS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263) 	/* perf_time__ranges_skip_sample does not work if time is zero */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264) 	if (!tm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265) 		tm = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) 	return !n || !perf_time__ranges_skip_sample(range, n, tm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) static int intel_pt_do_fix_overlap(struct intel_pt *pt, struct auxtrace_buffer *a,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) 				   struct auxtrace_buffer *b)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) 	bool consecutive = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) 	void *start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) 	start = intel_pt_find_overlap(a->data, a->size, b->data, b->size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277) 				      pt->have_tsc, &consecutive);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) 	if (!start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280) 	b->use_size = b->data + b->size - start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281) 	b->use_data = start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282) 	if (b->use_size && consecutive)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283) 		b->consecutive = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287) static int intel_pt_get_buffer(struct intel_pt_queue *ptq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) 			       struct auxtrace_buffer *buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289) 			       struct auxtrace_buffer *old_buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290) 			       struct intel_pt_buffer *b)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) 	bool might_overlap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294) 	if (!buffer->data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295) 		int fd = perf_data__fd(ptq->pt->session->data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) 		buffer->data = auxtrace_buffer__get_data(buffer, fd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) 		if (!buffer->data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) 			return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302) 	might_overlap = ptq->pt->snapshot_mode || ptq->pt->sampling_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) 	if (might_overlap && !buffer->consecutive && old_buffer &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) 	    intel_pt_do_fix_overlap(ptq->pt, old_buffer, buffer))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) 	if (buffer->use_data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) 		b->len = buffer->use_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309) 		b->buf = buffer->use_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311) 		b->len = buffer->size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312) 		b->buf = buffer->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314) 	b->ref_timestamp = buffer->reference;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316) 	if (!old_buffer || (might_overlap && !buffer->consecutive)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317) 		b->consecutive = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318) 		b->trace_nr = buffer->buffer_nr + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) 		b->consecutive = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326) /* Do not drop buffers with references - refer intel_pt_get_trace() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327) static void intel_pt_lookahead_drop_buffer(struct intel_pt_queue *ptq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328) 					   struct auxtrace_buffer *buffer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330) 	if (!buffer || buffer == ptq->buffer || buffer == ptq->old_buffer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333) 	auxtrace_buffer__drop_data(buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336) /* Must be serialized with respect to intel_pt_get_trace() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337) static int intel_pt_lookahead(void *data, intel_pt_lookahead_cb_t cb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338) 			      void *cb_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340) 	struct intel_pt_queue *ptq = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341) 	struct auxtrace_buffer *buffer = ptq->buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342) 	struct auxtrace_buffer *old_buffer = ptq->old_buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343) 	struct auxtrace_queue *queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344) 	int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346) 	queue = &ptq->pt->queues.queue_array[ptq->queue_nr];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) 	while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) 		struct intel_pt_buffer b = { .len = 0 };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) 		buffer = auxtrace_buffer__next(queue, buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) 		if (!buffer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) 		err = intel_pt_get_buffer(ptq, buffer, old_buffer, &b);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356) 		if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) 		if (b.len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) 			intel_pt_lookahead_drop_buffer(ptq, old_buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361) 			old_buffer = buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363) 			intel_pt_lookahead_drop_buffer(ptq, buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367) 		err = cb(&b, cb_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) 		if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) 	if (buffer != old_buffer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373) 		intel_pt_lookahead_drop_buffer(ptq, buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) 	intel_pt_lookahead_drop_buffer(ptq, old_buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380)  * This function assumes data is processed sequentially only.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381)  * Must be serialized with respect to intel_pt_lookahead()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) static int intel_pt_get_trace(struct intel_pt_buffer *b, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) 	struct intel_pt_queue *ptq = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) 	struct auxtrace_buffer *buffer = ptq->buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) 	struct auxtrace_buffer *old_buffer = ptq->old_buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) 	struct auxtrace_queue *queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) 	if (ptq->stop) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) 		b->len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) 	queue = &ptq->pt->queues.queue_array[ptq->queue_nr];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) 	buffer = auxtrace_buffer__next(queue, buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) 	if (!buffer) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) 		if (old_buffer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) 			auxtrace_buffer__drop_data(old_buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) 		b->len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) 	ptq->buffer = buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) 	err = intel_pt_get_buffer(ptq, buffer, old_buffer, b);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) 	if (ptq->step_through_buffers)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) 		ptq->stop = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) 	if (b->len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) 		if (old_buffer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) 			auxtrace_buffer__drop_data(old_buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) 		ptq->old_buffer = buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) 		auxtrace_buffer__drop_data(buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) 		return intel_pt_get_trace(b, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427) struct intel_pt_cache_entry {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) 	struct auxtrace_cache_entry	entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) 	u64				insn_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) 	u64				byte_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) 	enum intel_pt_insn_op		op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) 	enum intel_pt_insn_branch	branch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) 	int				length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) 	int32_t				rel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) 	char				insn[INTEL_PT_INSN_BUF_SZ];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) static int intel_pt_config_div(const char *var, const char *value, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) 	int *d = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) 	long val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) 	if (!strcmp(var, "intel-pt.cache-divisor")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) 		val = strtol(value, NULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) 		if (val > 0 && val <= INT_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) 			*d = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) static int intel_pt_cache_divisor(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) 	static int d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) 	if (d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) 		return d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) 	perf_config(intel_pt_config_div, &d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) 	if (!d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) 		d = 64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) 	return d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) static unsigned int intel_pt_cache_size(struct dso *dso,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) 					struct machine *machine)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) 	off_t size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) 	size = dso__data_size(dso, machine);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) 	size /= intel_pt_cache_divisor();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) 	if (size < 1000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) 		return 10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) 	if (size > (1 << 21))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) 		return 21;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478) 	return 32 - __builtin_clz(size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) static struct auxtrace_cache *intel_pt_cache(struct dso *dso,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) 					     struct machine *machine)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) 	struct auxtrace_cache *c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) 	unsigned int bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) 	if (dso->auxtrace_cache)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) 		return dso->auxtrace_cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) 	bits = intel_pt_cache_size(dso, machine);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) 	/* Ignoring cache creation failure */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) 	c = auxtrace_cache__new(bits, sizeof(struct intel_pt_cache_entry), 200);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) 	dso->auxtrace_cache = c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497) 	return c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) static int intel_pt_cache_add(struct dso *dso, struct machine *machine,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) 			      u64 offset, u64 insn_cnt, u64 byte_cnt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) 			      struct intel_pt_insn *intel_pt_insn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) 	struct auxtrace_cache *c = intel_pt_cache(dso, machine);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) 	struct intel_pt_cache_entry *e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) 	if (!c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) 	e = auxtrace_cache__alloc_entry(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512) 	if (!e)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) 	e->insn_cnt = insn_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516) 	e->byte_cnt = byte_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) 	e->op = intel_pt_insn->op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) 	e->branch = intel_pt_insn->branch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) 	e->length = intel_pt_insn->length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) 	e->rel = intel_pt_insn->rel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) 	memcpy(e->insn, intel_pt_insn->buf, INTEL_PT_INSN_BUF_SZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) 	err = auxtrace_cache__add(c, offset, &e->entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) 		auxtrace_cache__free_entry(c, e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530) static struct intel_pt_cache_entry *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) intel_pt_cache_lookup(struct dso *dso, struct machine *machine, u64 offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) 	struct auxtrace_cache *c = intel_pt_cache(dso, machine);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) 	if (!c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) 	return auxtrace_cache__lookup(dso->auxtrace_cache, offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) static void intel_pt_cache_invalidate(struct dso *dso, struct machine *machine,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) 				      u64 offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) 	struct auxtrace_cache *c = intel_pt_cache(dso, machine);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) 	if (!c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) 	auxtrace_cache__remove(dso->auxtrace_cache, offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) static inline u8 intel_pt_cpumode(struct intel_pt *pt, uint64_t ip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554) 	return ip >= pt->kernel_start ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555) 	       PERF_RECORD_MISC_KERNEL :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556) 	       PERF_RECORD_MISC_USER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559) static int intel_pt_walk_next_insn(struct intel_pt_insn *intel_pt_insn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560) 				   uint64_t *insn_cnt_ptr, uint64_t *ip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561) 				   uint64_t to_ip, uint64_t max_insn_cnt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562) 				   void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564) 	struct intel_pt_queue *ptq = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) 	struct machine *machine = ptq->pt->machine;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) 	struct thread *thread;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) 	struct addr_location al;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) 	unsigned char buf[INTEL_PT_INSN_BUF_SZ];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569) 	ssize_t len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570) 	int x86_64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571) 	u8 cpumode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572) 	u64 offset, start_offset, start_ip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573) 	u64 insn_cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574) 	bool one_map = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576) 	intel_pt_insn->length = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578) 	if (to_ip && *ip == to_ip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) 		goto out_no_cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) 	cpumode = intel_pt_cpumode(ptq->pt, *ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583) 	thread = ptq->thread;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584) 	if (!thread) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585) 		if (cpumode != PERF_RECORD_MISC_KERNEL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) 		thread = ptq->pt->unknown_thread;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590) 	while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) 		if (!thread__find_map(thread, cpumode, *ip, &al) || !al.map->dso)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) 		if (al.map->dso->data.status == DSO_DATA_STATUS_ERROR &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) 		    dso__data_status_seen(al.map->dso,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) 					  DSO_DATA_STATUS_SEEN_ITRACE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) 			return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) 		offset = al.map->map_ip(al.map, *ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) 		if (!to_ip && one_map) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) 			struct intel_pt_cache_entry *e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) 			e = intel_pt_cache_lookup(al.map->dso, machine, offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) 			if (e &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) 			    (!max_insn_cnt || e->insn_cnt <= max_insn_cnt)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) 				*insn_cnt_ptr = e->insn_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) 				*ip += e->byte_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) 				intel_pt_insn->op = e->op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) 				intel_pt_insn->branch = e->branch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) 				intel_pt_insn->length = e->length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) 				intel_pt_insn->rel = e->rel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) 				memcpy(intel_pt_insn->buf, e->insn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) 				       INTEL_PT_INSN_BUF_SZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) 				intel_pt_log_insn_no_data(intel_pt_insn, *ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) 				return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) 		start_offset = offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) 		start_ip = *ip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) 		/* Load maps to ensure dso->is_64_bit has been updated */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) 		map__load(al.map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) 		x86_64 = al.map->dso->is_64_bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) 		while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) 			len = dso__data_read_offset(al.map->dso, machine,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) 						    offset, buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) 						    INTEL_PT_INSN_BUF_SZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) 			if (len <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) 				return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) 			if (intel_pt_get_insn(buf, len, x86_64, intel_pt_insn))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) 				return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) 			intel_pt_log_insn(intel_pt_insn, *ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640) 			insn_cnt += 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642) 			if (intel_pt_insn->branch != INTEL_PT_BR_NO_BRANCH)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643) 				goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) 			if (max_insn_cnt && insn_cnt >= max_insn_cnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646) 				goto out_no_cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) 			*ip += intel_pt_insn->length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) 			if (to_ip && *ip == to_ip) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651) 				intel_pt_insn->length = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) 				goto out_no_cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) 			if (*ip >= al.map->end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) 			offset += intel_pt_insn->length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) 		one_map = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) 	*insn_cnt_ptr = insn_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) 	if (!one_map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) 		goto out_no_cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) 	 * Didn't lookup in the 'to_ip' case, so do it now to prevent duplicate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) 	 * entries.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) 	if (to_ip) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) 		struct intel_pt_cache_entry *e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) 		e = intel_pt_cache_lookup(al.map->dso, machine, start_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) 		if (e)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680) 	/* Ignore cache errors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) 	intel_pt_cache_add(al.map->dso, machine, start_offset, insn_cnt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) 			   *ip - start_ip, intel_pt_insn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686) out_no_cache:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) 	*insn_cnt_ptr = insn_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) static bool intel_pt_match_pgd_ip(struct intel_pt *pt, uint64_t ip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) 				  uint64_t offset, const char *filename)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) 	struct addr_filter *filt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) 	bool have_filter   = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) 	bool hit_tracestop = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697) 	bool hit_filter    = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) 	list_for_each_entry(filt, &pt->filts.head, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) 		if (filt->start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) 			have_filter = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) 		if ((filename && !filt->filename) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) 		    (!filename && filt->filename) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) 		    (filename && strcmp(filename, filt->filename)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) 		if (!(offset >= filt->addr && offset < filt->addr + filt->size))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) 		intel_pt_log("TIP.PGD ip %#"PRIx64" offset %#"PRIx64" in %s hit filter: %s offset %#"PRIx64" size %#"PRIx64"\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) 			     ip, offset, filename ? filename : "[kernel]",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) 			     filt->start ? "filter" : "stop",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) 			     filt->addr, filt->size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) 		if (filt->start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) 			hit_filter = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) 			hit_tracestop = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) 	if (!hit_tracestop && !hit_filter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) 		intel_pt_log("TIP.PGD ip %#"PRIx64" offset %#"PRIx64" in %s is not in a filter region\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) 			     ip, offset, filename ? filename : "[kernel]");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) 	return hit_tracestop || (have_filter && !hit_filter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) static int __intel_pt_pgd_ip(uint64_t ip, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) 	struct intel_pt_queue *ptq = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) 	struct thread *thread;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) 	struct addr_location al;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) 	u8 cpumode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) 	u64 offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) 	if (ip >= ptq->pt->kernel_start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) 		return intel_pt_match_pgd_ip(ptq->pt, ip, ip, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) 	cpumode = PERF_RECORD_MISC_USER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) 	thread = ptq->thread;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) 	if (!thread)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746) 	if (!thread__find_map(thread, cpumode, ip, &al) || !al.map->dso)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) 	offset = al.map->map_ip(al.map, ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) 	return intel_pt_match_pgd_ip(ptq->pt, ip, offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) 				     al.map->dso->long_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755) static bool intel_pt_pgd_ip(uint64_t ip, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) 	return __intel_pt_pgd_ip(ip, data) > 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) static bool intel_pt_get_config(struct intel_pt *pt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) 				struct perf_event_attr *attr, u64 *config)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) 	if (attr->type == pt->pmu_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) 		if (config)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) 			*config = attr->config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) 	return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) static bool intel_pt_exclude_kernel(struct intel_pt *pt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) 	struct evsel *evsel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) 	evlist__for_each_entry(pt->session->evlist, evsel) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) 		if (intel_pt_get_config(pt, &evsel->core.attr, NULL) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) 		    !evsel->core.attr.exclude_kernel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) 			return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) static bool intel_pt_return_compression(struct intel_pt *pt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) 	struct evsel *evsel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) 	u64 config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) 	if (!pt->noretcomp_bit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) 	evlist__for_each_entry(pt->session->evlist, evsel) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) 		if (intel_pt_get_config(pt, &evsel->core.attr, &config) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) 		    (config & pt->noretcomp_bit))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) 			return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) static bool intel_pt_branch_enable(struct intel_pt *pt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) 	struct evsel *evsel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) 	u64 config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) 	evlist__for_each_entry(pt->session->evlist, evsel) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) 		if (intel_pt_get_config(pt, &evsel->core.attr, &config) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) 		    (config & 1) && !(config & 0x2000))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) 			return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) static unsigned int intel_pt_mtc_period(struct intel_pt *pt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) 	struct evsel *evsel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) 	unsigned int shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) 	u64 config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) 	if (!pt->mtc_freq_bits)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) 	for (shift = 0, config = pt->mtc_freq_bits; !(config & 1); shift++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) 		config >>= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) 	evlist__for_each_entry(pt->session->evlist, evsel) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) 		if (intel_pt_get_config(pt, &evsel->core.attr, &config))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) 			return (config & pt->mtc_freq_bits) >> shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) static bool intel_pt_timeless_decoding(struct intel_pt *pt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) 	struct evsel *evsel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) 	bool timeless_decoding = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836) 	u64 config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838) 	if (!pt->tsc_bit || !pt->cap_user_time_zero)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) 	evlist__for_each_entry(pt->session->evlist, evsel) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) 		if (!(evsel->core.attr.sample_type & PERF_SAMPLE_TIME))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) 			return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) 		if (intel_pt_get_config(pt, &evsel->core.attr, &config)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) 			if (config & pt->tsc_bit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) 				timeless_decoding = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) 			else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) 				return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) 	return timeless_decoding;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) static bool intel_pt_tracing_kernel(struct intel_pt *pt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) 	struct evsel *evsel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) 	evlist__for_each_entry(pt->session->evlist, evsel) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) 		if (intel_pt_get_config(pt, &evsel->core.attr, NULL) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) 		    !evsel->core.attr.exclude_kernel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) 			return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) 	return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) static bool intel_pt_have_tsc(struct intel_pt *pt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) 	struct evsel *evsel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) 	bool have_tsc = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) 	u64 config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) 	if (!pt->tsc_bit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) 	evlist__for_each_entry(pt->session->evlist, evsel) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) 		if (intel_pt_get_config(pt, &evsel->core.attr, &config)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) 			if (config & pt->tsc_bit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) 				have_tsc = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) 			else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) 				return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) 	return have_tsc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) static bool intel_pt_sampling_mode(struct intel_pt *pt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) 	struct evsel *evsel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) 	evlist__for_each_entry(pt->session->evlist, evsel) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) 		if ((evsel->core.attr.sample_type & PERF_SAMPLE_AUX) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) 		    evsel->core.attr.aux_sample_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) 			return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) 	return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) static u64 intel_pt_ctl(struct intel_pt *pt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900) 	struct evsel *evsel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) 	u64 config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) 	evlist__for_each_entry(pt->session->evlist, evsel) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) 		if (intel_pt_get_config(pt, &evsel->core.attr, &config))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) 			return config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910) static u64 intel_pt_ns_to_ticks(const struct intel_pt *pt, u64 ns)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) 	u64 quot, rem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) 	quot = ns / pt->tc.time_mult;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) 	rem  = ns % pt->tc.time_mult;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) 	return (quot << pt->tc.time_shift) + (rem << pt->tc.time_shift) /
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) 		pt->tc.time_mult;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) static struct ip_callchain *intel_pt_alloc_chain(struct intel_pt *pt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) 	size_t sz = sizeof(struct ip_callchain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) 	/* Add 1 to callchain_sz for callchain context */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) 	sz += (pt->synth_opts.callchain_sz + 1) * sizeof(u64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) 	return zalloc(sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) static int intel_pt_callchain_init(struct intel_pt *pt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) 	struct evsel *evsel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933) 	evlist__for_each_entry(pt->session->evlist, evsel) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) 		if (!(evsel->core.attr.sample_type & PERF_SAMPLE_CALLCHAIN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935) 			evsel->synth_sample_type |= PERF_SAMPLE_CALLCHAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938) 	pt->chain = intel_pt_alloc_chain(pt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939) 	if (!pt->chain)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) static void intel_pt_add_callchain(struct intel_pt *pt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) 				   struct perf_sample *sample)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) 	struct thread *thread = machine__findnew_thread(pt->machine,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) 							sample->pid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) 							sample->tid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) 	thread_stack__sample_late(thread, sample->cpu, pt->chain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) 				  pt->synth_opts.callchain_sz + 1, sample->ip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) 				  pt->kernel_start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) 	sample->callchain = pt->chain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) static struct branch_stack *intel_pt_alloc_br_stack(unsigned int entry_cnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) 	size_t sz = sizeof(struct branch_stack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) 	sz += entry_cnt * sizeof(struct branch_entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) 	return zalloc(sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) static int intel_pt_br_stack_init(struct intel_pt *pt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) 	struct evsel *evsel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) 	evlist__for_each_entry(pt->session->evlist, evsel) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) 		if (!(evsel->core.attr.sample_type & PERF_SAMPLE_BRANCH_STACK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) 			evsel->synth_sample_type |= PERF_SAMPLE_BRANCH_STACK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) 	pt->br_stack = intel_pt_alloc_br_stack(pt->br_stack_sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) 	if (!pt->br_stack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) static void intel_pt_add_br_stack(struct intel_pt *pt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) 				  struct perf_sample *sample)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) 	struct thread *thread = machine__findnew_thread(pt->machine,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) 							sample->pid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) 							sample->tid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) 	thread_stack__br_sample_late(thread, sample->cpu, pt->br_stack,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) 				     pt->br_stack_sz, sample->ip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) 				     pt->kernel_start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) 	sample->branch_stack = pt->br_stack;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) /* INTEL_PT_LBR_0, INTEL_PT_LBR_1 and INTEL_PT_LBR_2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) #define LBRS_MAX (INTEL_PT_BLK_ITEM_ID_CNT * 3U)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) static struct intel_pt_queue *intel_pt_alloc_queue(struct intel_pt *pt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) 						   unsigned int queue_nr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) 	struct intel_pt_params params = { .get_trace = 0, };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) 	struct perf_env *env = pt->machine->env;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) 	struct intel_pt_queue *ptq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) 	ptq = zalloc(sizeof(struct intel_pt_queue));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) 	if (!ptq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) 	if (pt->synth_opts.callchain) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) 		ptq->chain = intel_pt_alloc_chain(pt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) 		if (!ptq->chain)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) 			goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) 	if (pt->synth_opts.last_branch || pt->synth_opts.other_events) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) 		unsigned int entry_cnt = max(LBRS_MAX, pt->br_stack_sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) 		ptq->last_branch = intel_pt_alloc_br_stack(entry_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) 		if (!ptq->last_branch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) 			goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) 	ptq->event_buf = malloc(PERF_SAMPLE_MAX_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) 	if (!ptq->event_buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) 		goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) 	ptq->pt = pt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) 	ptq->queue_nr = queue_nr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) 	ptq->exclude_kernel = intel_pt_exclude_kernel(pt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) 	ptq->pid = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) 	ptq->tid = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) 	ptq->cpu = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) 	ptq->next_tid = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) 	params.get_trace = intel_pt_get_trace;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) 	params.walk_insn = intel_pt_walk_next_insn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) 	params.lookahead = intel_pt_lookahead;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) 	params.data = ptq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) 	params.return_compression = intel_pt_return_compression(pt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) 	params.branch_enable = intel_pt_branch_enable(pt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) 	params.ctl = intel_pt_ctl(pt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) 	params.max_non_turbo_ratio = pt->max_non_turbo_ratio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) 	params.mtc_period = intel_pt_mtc_period(pt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) 	params.tsc_ctc_ratio_n = pt->tsc_ctc_ratio_n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) 	params.tsc_ctc_ratio_d = pt->tsc_ctc_ratio_d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) 	params.quick = pt->synth_opts.quick;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) 	if (pt->filts.cnt > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) 		params.pgd_ip = intel_pt_pgd_ip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) 	if (pt->synth_opts.instructions) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) 		if (pt->synth_opts.period) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) 			switch (pt->synth_opts.period_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) 			case PERF_ITRACE_PERIOD_INSTRUCTIONS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) 				params.period_type =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) 						INTEL_PT_PERIOD_INSTRUCTIONS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) 				params.period = pt->synth_opts.period;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) 			case PERF_ITRACE_PERIOD_TICKS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) 				params.period_type = INTEL_PT_PERIOD_TICKS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) 				params.period = pt->synth_opts.period;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) 			case PERF_ITRACE_PERIOD_NANOSECS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) 				params.period_type = INTEL_PT_PERIOD_TICKS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) 				params.period = intel_pt_ns_to_ticks(pt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) 							pt->synth_opts.period);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) 			default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) 		if (!params.period) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) 			params.period_type = INTEL_PT_PERIOD_INSTRUCTIONS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) 			params.period = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) 	if (env->cpuid && !strncmp(env->cpuid, "GenuineIntel,6,92,", 18))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) 		params.flags |= INTEL_PT_FUP_WITH_NLIP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) 	ptq->decoder = intel_pt_decoder_new(&params);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) 	if (!ptq->decoder)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) 		goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) 	return ptq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) out_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) 	zfree(&ptq->event_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) 	zfree(&ptq->last_branch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) 	zfree(&ptq->chain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) 	free(ptq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) static void intel_pt_free_queue(void *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) 	struct intel_pt_queue *ptq = priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) 	if (!ptq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) 	thread__zput(ptq->thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) 	intel_pt_decoder_free(ptq->decoder);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) 	zfree(&ptq->event_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) 	zfree(&ptq->last_branch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) 	zfree(&ptq->chain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) 	free(ptq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) static void intel_pt_set_pid_tid_cpu(struct intel_pt *pt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) 				     struct auxtrace_queue *queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) 	struct intel_pt_queue *ptq = queue->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) 	if (queue->tid == -1 || pt->have_sched_switch) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) 		ptq->tid = machine__get_current_tid(pt->machine, ptq->cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) 		if (ptq->tid == -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) 			ptq->pid = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) 		thread__zput(ptq->thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) 	if (!ptq->thread && ptq->tid != -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) 		ptq->thread = machine__find_thread(pt->machine, -1, ptq->tid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) 	if (ptq->thread) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) 		ptq->pid = ptq->thread->pid_;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) 		if (queue->cpu == -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) 			ptq->cpu = ptq->thread->cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) static void intel_pt_sample_flags(struct intel_pt_queue *ptq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) 	ptq->insn_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) 	if (ptq->state->flags & INTEL_PT_ABORT_TX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) 		ptq->flags = PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_TX_ABORT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) 	} else if (ptq->state->flags & INTEL_PT_ASYNC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) 		if (ptq->state->to_ip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) 			ptq->flags = PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_CALL |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) 				     PERF_IP_FLAG_ASYNC |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) 				     PERF_IP_FLAG_INTERRUPT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) 			ptq->flags = PERF_IP_FLAG_BRANCH |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) 				     PERF_IP_FLAG_TRACE_END;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) 		ptq->insn_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) 		if (ptq->state->from_ip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) 			ptq->flags = intel_pt_insn_type(ptq->state->insn_op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) 			ptq->flags = PERF_IP_FLAG_BRANCH |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) 				     PERF_IP_FLAG_TRACE_BEGIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) 		if (ptq->state->flags & INTEL_PT_IN_TX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) 			ptq->flags |= PERF_IP_FLAG_IN_TX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) 		ptq->insn_len = ptq->state->insn_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) 		memcpy(ptq->insn, ptq->state->insn, INTEL_PT_INSN_BUF_SZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) 	if (ptq->state->type & INTEL_PT_TRACE_BEGIN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) 		ptq->flags |= PERF_IP_FLAG_TRACE_BEGIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) 	if (ptq->state->type & INTEL_PT_TRACE_END)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) 		ptq->flags |= PERF_IP_FLAG_TRACE_END;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) static void intel_pt_setup_time_range(struct intel_pt *pt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) 				      struct intel_pt_queue *ptq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) 	if (!pt->range_cnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) 	ptq->sel_timestamp = pt->time_ranges[0].start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) 	ptq->sel_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) 	if (ptq->sel_timestamp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) 		ptq->sel_start = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) 		ptq->sel_timestamp = pt->time_ranges[0].end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) 		ptq->sel_start = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) static int intel_pt_setup_queue(struct intel_pt *pt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) 				struct auxtrace_queue *queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) 				unsigned int queue_nr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) 	struct intel_pt_queue *ptq = queue->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) 	if (list_empty(&queue->head))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) 	if (!ptq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) 		ptq = intel_pt_alloc_queue(pt, queue_nr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) 		if (!ptq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) 			return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) 		queue->priv = ptq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) 		if (queue->cpu != -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) 			ptq->cpu = queue->cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) 		ptq->tid = queue->tid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) 		ptq->cbr_seen = UINT_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) 		if (pt->sampling_mode && !pt->snapshot_mode &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) 		    pt->timeless_decoding)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) 			ptq->step_through_buffers = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) 		ptq->sync_switch = pt->sync_switch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) 		intel_pt_setup_time_range(pt, ptq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) 	if (!ptq->on_heap &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) 	    (!ptq->sync_switch ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) 	     ptq->switch_state != INTEL_PT_SS_EXPECTING_SWITCH_EVENT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) 		const struct intel_pt_state *state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) 		int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) 		if (pt->timeless_decoding)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) 		intel_pt_log("queue %u getting timestamp\n", queue_nr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) 		intel_pt_log("queue %u decoding cpu %d pid %d tid %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) 			     queue_nr, ptq->cpu, ptq->pid, ptq->tid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) 		if (ptq->sel_start && ptq->sel_timestamp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) 			ret = intel_pt_fast_forward(ptq->decoder,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) 						    ptq->sel_timestamp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) 			if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) 				return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) 		while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) 			state = intel_pt_decode(ptq->decoder);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) 			if (state->err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) 				if (state->err == INTEL_PT_ERR_NODATA) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) 					intel_pt_log("queue %u has no timestamp\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) 						     queue_nr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) 					return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) 			if (state->timestamp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) 		ptq->timestamp = state->timestamp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) 		intel_pt_log("queue %u timestamp 0x%" PRIx64 "\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) 			     queue_nr, ptq->timestamp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) 		ptq->state = state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) 		ptq->have_sample = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) 		if (ptq->sel_start && ptq->sel_timestamp &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) 		    ptq->timestamp < ptq->sel_timestamp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) 			ptq->have_sample = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) 		intel_pt_sample_flags(ptq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) 		ret = auxtrace_heap__add(&pt->heap, queue_nr, ptq->timestamp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) 			return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) 		ptq->on_heap = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) static int intel_pt_setup_queues(struct intel_pt *pt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) 	unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) 	for (i = 0; i < pt->queues.nr_queues; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) 		ret = intel_pt_setup_queue(pt, &pt->queues.queue_array[i], i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) 			return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) static inline bool intel_pt_skip_event(struct intel_pt *pt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) 	return pt->synth_opts.initial_skip &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) 	       pt->num_events++ < pt->synth_opts.initial_skip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285)  * Cannot count CBR as skipped because it won't go away until cbr == cbr_seen.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286)  * Also ensure CBR is first non-skipped event by allowing for 4 more samples
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287)  * from this decoder state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) static inline bool intel_pt_skip_cbr_event(struct intel_pt *pt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) 	return pt->synth_opts.initial_skip &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) 	       pt->num_events + 4 < pt->synth_opts.initial_skip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) static void intel_pt_prep_a_sample(struct intel_pt_queue *ptq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) 				   union perf_event *event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) 				   struct perf_sample *sample)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) 	event->sample.header.type = PERF_RECORD_SAMPLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) 	event->sample.header.size = sizeof(struct perf_event_header);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) 	sample->pid = ptq->pid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) 	sample->tid = ptq->tid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) 	sample->cpu = ptq->cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) 	sample->insn_len = ptq->insn_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) 	memcpy(sample->insn, ptq->insn, INTEL_PT_INSN_BUF_SZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) static void intel_pt_prep_b_sample(struct intel_pt *pt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) 				   struct intel_pt_queue *ptq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) 				   union perf_event *event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) 				   struct perf_sample *sample)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) 	intel_pt_prep_a_sample(ptq, event, sample);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) 	if (!pt->timeless_decoding)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) 		sample->time = tsc_to_perf_time(ptq->timestamp, &pt->tc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) 	sample->ip = ptq->state->from_ip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) 	sample->cpumode = intel_pt_cpumode(pt, sample->ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) 	sample->addr = ptq->state->to_ip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) 	sample->period = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) 	sample->flags = ptq->flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) 	event->sample.header.misc = sample->cpumode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) static int intel_pt_inject_event(union perf_event *event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) 				 struct perf_sample *sample, u64 type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) 	event->header.size = perf_event__sample_event_size(sample, type, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) 	return perf_event__synthesize_sample(event, type, 0, sample);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) static inline int intel_pt_opt_inject(struct intel_pt *pt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) 				      union perf_event *event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) 				      struct perf_sample *sample, u64 type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) 	if (!pt->synth_opts.inject)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) 	return intel_pt_inject_event(event, sample, type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) static int intel_pt_deliver_synth_event(struct intel_pt *pt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) 					union perf_event *event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) 					struct perf_sample *sample, u64 type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) 	ret = intel_pt_opt_inject(pt, event, sample, type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) 	ret = perf_session__deliver_synth_event(pt->session, event, sample);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) 		pr_err("Intel PT: failed to deliver event, error %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) static int intel_pt_synth_branch_sample(struct intel_pt_queue *ptq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) 	struct intel_pt *pt = ptq->pt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) 	union perf_event *event = ptq->event_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) 	struct perf_sample sample = { .ip = 0, };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) 	struct dummy_branch_stack {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) 		u64			nr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) 		u64			hw_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) 		struct branch_entry	entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) 	} dummy_bs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) 	if (pt->branches_filter && !(pt->branches_filter & ptq->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) 	if (intel_pt_skip_event(pt))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) 	intel_pt_prep_b_sample(pt, ptq, event, &sample);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) 	sample.id = ptq->pt->branches_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) 	sample.stream_id = ptq->pt->branches_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) 	 * perf report cannot handle events without a branch stack when using
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) 	 * SORT_MODE__BRANCH so make a dummy one.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) 	if (pt->synth_opts.last_branch && sort__mode == SORT_MODE__BRANCH) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) 		dummy_bs = (struct dummy_branch_stack){
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) 			.nr = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) 			.hw_idx = -1ULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) 			.entries = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) 				.from = sample.ip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) 				.to = sample.addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) 			},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) 		};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) 		sample.branch_stack = (struct branch_stack *)&dummy_bs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) 	if (ptq->state->flags & INTEL_PT_SAMPLE_IPC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) 		sample.cyc_cnt = ptq->ipc_cyc_cnt - ptq->last_br_cyc_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) 	if (sample.cyc_cnt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) 		sample.insn_cnt = ptq->ipc_insn_cnt - ptq->last_br_insn_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) 		ptq->last_br_insn_cnt = ptq->ipc_insn_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) 		ptq->last_br_cyc_cnt = ptq->ipc_cyc_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) 	return intel_pt_deliver_synth_event(pt, event, &sample,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) 					    pt->branches_sample_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) static void intel_pt_prep_sample(struct intel_pt *pt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) 				 struct intel_pt_queue *ptq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) 				 union perf_event *event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) 				 struct perf_sample *sample)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) 	intel_pt_prep_b_sample(pt, ptq, event, sample);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) 	if (pt->synth_opts.callchain) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) 		thread_stack__sample(ptq->thread, ptq->cpu, ptq->chain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) 				     pt->synth_opts.callchain_sz + 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) 				     sample->ip, pt->kernel_start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) 		sample->callchain = ptq->chain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) 	if (pt->synth_opts.last_branch) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) 		thread_stack__br_sample(ptq->thread, ptq->cpu, ptq->last_branch,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) 					pt->br_stack_sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) 		sample->branch_stack = ptq->last_branch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) static int intel_pt_synth_instruction_sample(struct intel_pt_queue *ptq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) 	struct intel_pt *pt = ptq->pt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) 	union perf_event *event = ptq->event_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) 	struct perf_sample sample = { .ip = 0, };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) 	if (intel_pt_skip_event(pt))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) 	intel_pt_prep_sample(pt, ptq, event, &sample);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) 	sample.id = ptq->pt->instructions_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) 	sample.stream_id = ptq->pt->instructions_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) 	if (pt->synth_opts.quick)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) 		sample.period = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) 		sample.period = ptq->state->tot_insn_cnt - ptq->last_insn_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) 	if (ptq->state->flags & INTEL_PT_SAMPLE_IPC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) 		sample.cyc_cnt = ptq->ipc_cyc_cnt - ptq->last_in_cyc_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) 	if (sample.cyc_cnt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) 		sample.insn_cnt = ptq->ipc_insn_cnt - ptq->last_in_insn_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) 		ptq->last_in_insn_cnt = ptq->ipc_insn_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) 		ptq->last_in_cyc_cnt = ptq->ipc_cyc_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) 	ptq->last_insn_cnt = ptq->state->tot_insn_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) 	return intel_pt_deliver_synth_event(pt, event, &sample,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) 					    pt->instructions_sample_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) static int intel_pt_synth_transaction_sample(struct intel_pt_queue *ptq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) 	struct intel_pt *pt = ptq->pt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) 	union perf_event *event = ptq->event_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) 	struct perf_sample sample = { .ip = 0, };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) 	if (intel_pt_skip_event(pt))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) 	intel_pt_prep_sample(pt, ptq, event, &sample);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) 	sample.id = ptq->pt->transactions_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) 	sample.stream_id = ptq->pt->transactions_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) 	return intel_pt_deliver_synth_event(pt, event, &sample,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) 					    pt->transactions_sample_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) static void intel_pt_prep_p_sample(struct intel_pt *pt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) 				   struct intel_pt_queue *ptq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) 				   union perf_event *event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) 				   struct perf_sample *sample)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) 	intel_pt_prep_sample(pt, ptq, event, sample);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) 	 * Zero IP is used to mean "trace start" but that is not the case for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) 	 * power or PTWRITE events with no IP, so clear the flags.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) 	if (!sample->ip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) 		sample->flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) static int intel_pt_synth_ptwrite_sample(struct intel_pt_queue *ptq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) 	struct intel_pt *pt = ptq->pt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) 	union perf_event *event = ptq->event_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) 	struct perf_sample sample = { .ip = 0, };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) 	struct perf_synth_intel_ptwrite raw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) 	if (intel_pt_skip_event(pt))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) 	intel_pt_prep_p_sample(pt, ptq, event, &sample);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) 	sample.id = ptq->pt->ptwrites_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) 	sample.stream_id = ptq->pt->ptwrites_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) 	raw.flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) 	raw.ip = !!(ptq->state->flags & INTEL_PT_FUP_IP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) 	raw.payload = cpu_to_le64(ptq->state->ptw_payload);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) 	sample.raw_size = perf_synth__raw_size(raw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) 	sample.raw_data = perf_synth__raw_data(&raw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) 	return intel_pt_deliver_synth_event(pt, event, &sample,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) 					    pt->ptwrites_sample_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) static int intel_pt_synth_cbr_sample(struct intel_pt_queue *ptq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) 	struct intel_pt *pt = ptq->pt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) 	union perf_event *event = ptq->event_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) 	struct perf_sample sample = { .ip = 0, };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) 	struct perf_synth_intel_cbr raw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) 	u32 flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) 	if (intel_pt_skip_cbr_event(pt))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) 	ptq->cbr_seen = ptq->state->cbr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) 	intel_pt_prep_p_sample(pt, ptq, event, &sample);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) 	sample.id = ptq->pt->cbr_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) 	sample.stream_id = ptq->pt->cbr_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) 	flags = (u16)ptq->state->cbr_payload | (pt->max_non_turbo_ratio << 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) 	raw.flags = cpu_to_le32(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) 	raw.freq = cpu_to_le32(raw.cbr * pt->cbr2khz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) 	raw.reserved3 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) 	sample.raw_size = perf_synth__raw_size(raw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) 	sample.raw_data = perf_synth__raw_data(&raw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) 	return intel_pt_deliver_synth_event(pt, event, &sample,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) 					    pt->pwr_events_sample_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) static int intel_pt_synth_mwait_sample(struct intel_pt_queue *ptq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) 	struct intel_pt *pt = ptq->pt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) 	union perf_event *event = ptq->event_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) 	struct perf_sample sample = { .ip = 0, };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) 	struct perf_synth_intel_mwait raw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) 	if (intel_pt_skip_event(pt))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) 	intel_pt_prep_p_sample(pt, ptq, event, &sample);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) 	sample.id = ptq->pt->mwait_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) 	sample.stream_id = ptq->pt->mwait_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) 	raw.reserved = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) 	raw.payload = cpu_to_le64(ptq->state->mwait_payload);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) 	sample.raw_size = perf_synth__raw_size(raw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) 	sample.raw_data = perf_synth__raw_data(&raw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) 	return intel_pt_deliver_synth_event(pt, event, &sample,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) 					    pt->pwr_events_sample_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) static int intel_pt_synth_pwre_sample(struct intel_pt_queue *ptq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) 	struct intel_pt *pt = ptq->pt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) 	union perf_event *event = ptq->event_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) 	struct perf_sample sample = { .ip = 0, };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) 	struct perf_synth_intel_pwre raw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) 	if (intel_pt_skip_event(pt))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) 	intel_pt_prep_p_sample(pt, ptq, event, &sample);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) 	sample.id = ptq->pt->pwre_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) 	sample.stream_id = ptq->pt->pwre_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) 	raw.reserved = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) 	raw.payload = cpu_to_le64(ptq->state->pwre_payload);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) 	sample.raw_size = perf_synth__raw_size(raw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) 	sample.raw_data = perf_synth__raw_data(&raw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) 	return intel_pt_deliver_synth_event(pt, event, &sample,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) 					    pt->pwr_events_sample_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) static int intel_pt_synth_exstop_sample(struct intel_pt_queue *ptq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) 	struct intel_pt *pt = ptq->pt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) 	union perf_event *event = ptq->event_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) 	struct perf_sample sample = { .ip = 0, };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) 	struct perf_synth_intel_exstop raw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) 	if (intel_pt_skip_event(pt))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) 	intel_pt_prep_p_sample(pt, ptq, event, &sample);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) 	sample.id = ptq->pt->exstop_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) 	sample.stream_id = ptq->pt->exstop_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) 	raw.flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) 	raw.ip = !!(ptq->state->flags & INTEL_PT_FUP_IP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) 	sample.raw_size = perf_synth__raw_size(raw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) 	sample.raw_data = perf_synth__raw_data(&raw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) 	return intel_pt_deliver_synth_event(pt, event, &sample,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) 					    pt->pwr_events_sample_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) static int intel_pt_synth_pwrx_sample(struct intel_pt_queue *ptq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) 	struct intel_pt *pt = ptq->pt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) 	union perf_event *event = ptq->event_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) 	struct perf_sample sample = { .ip = 0, };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) 	struct perf_synth_intel_pwrx raw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) 	if (intel_pt_skip_event(pt))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) 	intel_pt_prep_p_sample(pt, ptq, event, &sample);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) 	sample.id = ptq->pt->pwrx_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) 	sample.stream_id = ptq->pt->pwrx_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) 	raw.reserved = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) 	raw.payload = cpu_to_le64(ptq->state->pwrx_payload);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) 	sample.raw_size = perf_synth__raw_size(raw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) 	sample.raw_data = perf_synth__raw_data(&raw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) 	return intel_pt_deliver_synth_event(pt, event, &sample,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) 					    pt->pwr_events_sample_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655)  * PEBS gp_regs array indexes plus 1 so that 0 means not present. Refer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656)  * intel_pt_add_gp_regs().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) static const int pebs_gp_regs[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) 	[PERF_REG_X86_FLAGS]	= 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) 	[PERF_REG_X86_IP]	= 2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) 	[PERF_REG_X86_AX]	= 3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) 	[PERF_REG_X86_CX]	= 4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) 	[PERF_REG_X86_DX]	= 5,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) 	[PERF_REG_X86_BX]	= 6,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) 	[PERF_REG_X86_SP]	= 7,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) 	[PERF_REG_X86_BP]	= 8,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) 	[PERF_REG_X86_SI]	= 9,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) 	[PERF_REG_X86_DI]	= 10,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) 	[PERF_REG_X86_R8]	= 11,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) 	[PERF_REG_X86_R9]	= 12,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) 	[PERF_REG_X86_R10]	= 13,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) 	[PERF_REG_X86_R11]	= 14,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) 	[PERF_REG_X86_R12]	= 15,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) 	[PERF_REG_X86_R13]	= 16,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) 	[PERF_REG_X86_R14]	= 17,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) 	[PERF_REG_X86_R15]	= 18,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) static u64 *intel_pt_add_gp_regs(struct regs_dump *intr_regs, u64 *pos,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) 				 const struct intel_pt_blk_items *items,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) 				 u64 regs_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) 	const u64 *gp_regs = items->val[INTEL_PT_GP_REGS_POS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) 	u32 mask = items->mask[INTEL_PT_GP_REGS_POS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) 	u32 bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) 	for (i = 0, bit = 1; i < PERF_REG_X86_64_MAX; i++, bit <<= 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) 		/* Get the PEBS gp_regs array index */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) 		int n = pebs_gp_regs[i] - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) 		if (n < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) 		 * Add only registers that were requested (i.e. 'regs_mask') and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) 		 * that were provided (i.e. 'mask'), and update the resulting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) 		 * mask (i.e. 'intr_regs->mask') accordingly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) 		if (mask & 1 << n && regs_mask & bit) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) 			intr_regs->mask |= bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) 			*pos++ = gp_regs[n];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) 	return pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) #ifndef PERF_REG_X86_XMM0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) #define PERF_REG_X86_XMM0 32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) static void intel_pt_add_xmm(struct regs_dump *intr_regs, u64 *pos,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) 			     const struct intel_pt_blk_items *items,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) 			     u64 regs_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) 	u32 mask = items->has_xmm & (regs_mask >> PERF_REG_X86_XMM0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) 	const u64 *xmm = items->xmm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) 	 * If there are any XMM registers, then there should be all of them.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) 	 * Nevertheless, follow the logic to add only registers that were
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) 	 * requested (i.e. 'regs_mask') and that were provided (i.e. 'mask'),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) 	 * and update the resulting mask (i.e. 'intr_regs->mask') accordingly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) 	intr_regs->mask |= (u64)mask << PERF_REG_X86_XMM0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) 	for (; mask; mask >>= 1, xmm++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) 		if (mask & 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) 			*pos++ = *xmm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) #define LBR_INFO_MISPRED	(1ULL << 63)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) #define LBR_INFO_IN_TX		(1ULL << 62)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) #define LBR_INFO_ABORT		(1ULL << 61)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) #define LBR_INFO_CYCLES		0xffff
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) /* Refer kernel's intel_pmu_store_pebs_lbrs() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) static u64 intel_pt_lbr_flags(u64 info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) 	union {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) 		struct branch_flags flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) 		u64 result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) 	} u;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) 	u.result	  = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) 	u.flags.mispred	  = !!(info & LBR_INFO_MISPRED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) 	u.flags.predicted = !(info & LBR_INFO_MISPRED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) 	u.flags.in_tx	  = !!(info & LBR_INFO_IN_TX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) 	u.flags.abort	  = !!(info & LBR_INFO_ABORT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) 	u.flags.cycles	  = info & LBR_INFO_CYCLES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) 	return u.result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) static void intel_pt_add_lbrs(struct branch_stack *br_stack,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) 			      const struct intel_pt_blk_items *items)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) 	u64 *to;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) 	br_stack->nr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) 	to = &br_stack->entries[0].from;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) 	for (i = INTEL_PT_LBR_0_POS; i <= INTEL_PT_LBR_2_POS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) 		u32 mask = items->mask[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) 		const u64 *from = items->val[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) 		for (; mask; mask >>= 3, from += 3) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) 			if ((mask & 7) == 7) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) 				*to++ = from[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) 				*to++ = from[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) 				*to++ = intel_pt_lbr_flags(from[2]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) 				br_stack->nr += 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) static int intel_pt_synth_pebs_sample(struct intel_pt_queue *ptq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) 	const struct intel_pt_blk_items *items = &ptq->state->items;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) 	struct perf_sample sample = { .ip = 0, };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) 	union perf_event *event = ptq->event_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) 	struct intel_pt *pt = ptq->pt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) 	struct evsel *evsel = pt->pebs_evsel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) 	u64 sample_type = evsel->core.attr.sample_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) 	u64 id = evsel->core.id[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) 	u8 cpumode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) 	u64 regs[8 * sizeof(sample.intr_regs.mask)];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) 	if (intel_pt_skip_event(pt))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) 	intel_pt_prep_a_sample(ptq, event, &sample);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) 	sample.id = id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) 	sample.stream_id = id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) 	if (!evsel->core.attr.freq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) 		sample.period = evsel->core.attr.sample_period;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) 	/* No support for non-zero CS base */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) 	if (items->has_ip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) 		sample.ip = items->ip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) 	else if (items->has_rip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) 		sample.ip = items->rip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) 		sample.ip = ptq->state->from_ip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) 	/* No support for guest mode at this time */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) 	cpumode = sample.ip < ptq->pt->kernel_start ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) 		  PERF_RECORD_MISC_USER :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) 		  PERF_RECORD_MISC_KERNEL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) 	event->sample.header.misc = cpumode | PERF_RECORD_MISC_EXACT_IP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) 	sample.cpumode = cpumode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) 	if (sample_type & PERF_SAMPLE_TIME) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) 		u64 timestamp = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) 		if (items->has_timestamp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) 			timestamp = items->timestamp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) 		else if (!pt->timeless_decoding)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) 			timestamp = ptq->timestamp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) 		if (timestamp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) 			sample.time = tsc_to_perf_time(timestamp, &pt->tc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) 	if (sample_type & PERF_SAMPLE_CALLCHAIN &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) 	    pt->synth_opts.callchain) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) 		thread_stack__sample(ptq->thread, ptq->cpu, ptq->chain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) 				     pt->synth_opts.callchain_sz, sample.ip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) 				     pt->kernel_start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) 		sample.callchain = ptq->chain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) 	if (sample_type & PERF_SAMPLE_REGS_INTR &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) 	    (items->mask[INTEL_PT_GP_REGS_POS] ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) 	     items->mask[INTEL_PT_XMM_POS])) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) 		u64 regs_mask = evsel->core.attr.sample_regs_intr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) 		u64 *pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) 		sample.intr_regs.abi = items->is_32_bit ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) 				       PERF_SAMPLE_REGS_ABI_32 :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) 				       PERF_SAMPLE_REGS_ABI_64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) 		sample.intr_regs.regs = regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) 		pos = intel_pt_add_gp_regs(&sample.intr_regs, regs, items, regs_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) 		intel_pt_add_xmm(&sample.intr_regs, pos, items, regs_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) 	if (sample_type & PERF_SAMPLE_BRANCH_STACK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) 		if (items->mask[INTEL_PT_LBR_0_POS] ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) 		    items->mask[INTEL_PT_LBR_1_POS] ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) 		    items->mask[INTEL_PT_LBR_2_POS]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) 			intel_pt_add_lbrs(ptq->last_branch, items);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) 		} else if (pt->synth_opts.last_branch) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) 			thread_stack__br_sample(ptq->thread, ptq->cpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) 						ptq->last_branch,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) 						pt->br_stack_sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) 			ptq->last_branch->nr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) 		sample.branch_stack = ptq->last_branch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) 	if (sample_type & PERF_SAMPLE_ADDR && items->has_mem_access_address)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) 		sample.addr = items->mem_access_address;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) 	if (sample_type & PERF_SAMPLE_WEIGHT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) 		 * Refer kernel's setup_pebs_adaptive_sample_data() and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) 		 * intel_hsw_weight().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) 		if (items->has_mem_access_latency)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) 			sample.weight = items->mem_access_latency;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) 		if (!sample.weight && items->has_tsx_aux_info) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) 			/* Cycles last block */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) 			sample.weight = (u32)items->tsx_aux_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) 	if (sample_type & PERF_SAMPLE_TRANSACTION && items->has_tsx_aux_info) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) 		u64 ax = items->has_rax ? items->rax : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) 		/* Refer kernel's intel_hsw_transaction() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) 		u64 txn = (u8)(items->tsx_aux_info >> 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) 		/* For RTM XABORTs also log the abort code from AX */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) 		if (txn & PERF_TXN_TRANSACTION && ax & 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) 			txn |= ((ax >> 24) & 0xff) << PERF_TXN_ABORT_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) 		sample.transaction = txn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) 	return intel_pt_deliver_synth_event(pt, event, &sample, sample_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) static int intel_pt_synth_error(struct intel_pt *pt, int code, int cpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) 				pid_t pid, pid_t tid, u64 ip, u64 timestamp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) 	union perf_event event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) 	char msg[MAX_AUXTRACE_ERROR_MSG];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) 	if (pt->synth_opts.error_minus_flags) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) 		if (code == INTEL_PT_ERR_OVR &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) 		    pt->synth_opts.error_minus_flags & AUXTRACE_ERR_FLG_OVERFLOW)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) 		if (code == INTEL_PT_ERR_LOST &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) 		    pt->synth_opts.error_minus_flags & AUXTRACE_ERR_FLG_DATA_LOST)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) 	intel_pt__strerror(code, msg, MAX_AUXTRACE_ERROR_MSG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) 	auxtrace_synth_error(&event.auxtrace_error, PERF_AUXTRACE_ERROR_ITRACE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) 			     code, cpu, pid, tid, ip, msg, timestamp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) 	err = perf_session__deliver_synth_event(pt->session, &event, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) 		pr_err("Intel Processor Trace: failed to deliver error event, error %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) 		       err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) static int intel_ptq_synth_error(struct intel_pt_queue *ptq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) 				 const struct intel_pt_state *state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) 	struct intel_pt *pt = ptq->pt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) 	u64 tm = ptq->timestamp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) 	tm = pt->timeless_decoding ? 0 : tsc_to_perf_time(tm, &pt->tc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) 	return intel_pt_synth_error(pt, state->err, ptq->cpu, ptq->pid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) 				    ptq->tid, state->from_ip, tm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) static int intel_pt_next_tid(struct intel_pt *pt, struct intel_pt_queue *ptq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) 	struct auxtrace_queue *queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) 	pid_t tid = ptq->next_tid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) 	if (tid == -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) 	intel_pt_log("switch: cpu %d tid %d\n", ptq->cpu, tid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) 	err = machine__set_current_tid(pt->machine, ptq->cpu, -1, tid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) 	queue = &pt->queues.queue_array[ptq->queue_nr];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) 	intel_pt_set_pid_tid_cpu(pt, queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) 	ptq->next_tid = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) static inline bool intel_pt_is_switch_ip(struct intel_pt_queue *ptq, u64 ip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) 	struct intel_pt *pt = ptq->pt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) 	return ip == pt->switch_ip &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) 	       (ptq->flags & PERF_IP_FLAG_BRANCH) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) 	       !(ptq->flags & (PERF_IP_FLAG_CONDITIONAL | PERF_IP_FLAG_ASYNC |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) 			       PERF_IP_FLAG_INTERRUPT | PERF_IP_FLAG_TX_ABORT));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) #define INTEL_PT_PWR_EVT (INTEL_PT_MWAIT_OP | INTEL_PT_PWR_ENTRY | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) 			  INTEL_PT_EX_STOP | INTEL_PT_PWR_EXIT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) static int intel_pt_sample(struct intel_pt_queue *ptq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) 	const struct intel_pt_state *state = ptq->state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) 	struct intel_pt *pt = ptq->pt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) 	if (!ptq->have_sample)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) 	ptq->have_sample = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) 	ptq->ipc_insn_cnt = ptq->state->tot_insn_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) 	ptq->ipc_cyc_cnt = ptq->state->tot_cyc_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) 	 * Do PEBS first to allow for the possibility that the PEBS timestamp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) 	 * precedes the current timestamp.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) 	if (pt->sample_pebs && state->type & INTEL_PT_BLK_ITEMS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) 		err = intel_pt_synth_pebs_sample(ptq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) 		if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) 	if (pt->sample_pwr_events) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) 		if (ptq->state->cbr != ptq->cbr_seen) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) 			err = intel_pt_synth_cbr_sample(ptq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) 			if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) 				return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) 		if (state->type & INTEL_PT_PWR_EVT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) 			if (state->type & INTEL_PT_MWAIT_OP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) 				err = intel_pt_synth_mwait_sample(ptq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) 				if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) 					return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) 			if (state->type & INTEL_PT_PWR_ENTRY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) 				err = intel_pt_synth_pwre_sample(ptq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) 				if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) 					return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) 			if (state->type & INTEL_PT_EX_STOP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) 				err = intel_pt_synth_exstop_sample(ptq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) 				if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) 					return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) 			if (state->type & INTEL_PT_PWR_EXIT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) 				err = intel_pt_synth_pwrx_sample(ptq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) 				if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) 					return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) 	if (pt->sample_instructions && (state->type & INTEL_PT_INSTRUCTION)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) 		err = intel_pt_synth_instruction_sample(ptq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) 		if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) 	if (pt->sample_transactions && (state->type & INTEL_PT_TRANSACTION)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) 		err = intel_pt_synth_transaction_sample(ptq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) 		if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) 	if (pt->sample_ptwrites && (state->type & INTEL_PT_PTW)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) 		err = intel_pt_synth_ptwrite_sample(ptq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) 		if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) 	if (!(state->type & INTEL_PT_BRANCH))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) 	if (pt->use_thread_stack) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) 		thread_stack__event(ptq->thread, ptq->cpu, ptq->flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) 				    state->from_ip, state->to_ip, ptq->insn_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) 				    state->trace_nr, pt->callstack,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) 				    pt->br_stack_sz_plus,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) 				    pt->mispred_all);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) 		thread_stack__set_trace_nr(ptq->thread, ptq->cpu, state->trace_nr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) 	if (pt->sample_branches) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) 		err = intel_pt_synth_branch_sample(ptq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) 		if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) 	if (!ptq->sync_switch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) 	if (intel_pt_is_switch_ip(ptq, state->to_ip)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) 		switch (ptq->switch_state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) 		case INTEL_PT_SS_NOT_TRACING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) 		case INTEL_PT_SS_UNKNOWN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) 		case INTEL_PT_SS_EXPECTING_SWITCH_IP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) 			err = intel_pt_next_tid(pt, ptq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) 			if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) 				return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) 			ptq->switch_state = INTEL_PT_SS_TRACING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) 		default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) 			ptq->switch_state = INTEL_PT_SS_EXPECTING_SWITCH_EVENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) 			return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) 	} else if (!state->to_ip) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) 		ptq->switch_state = INTEL_PT_SS_NOT_TRACING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) 	} else if (ptq->switch_state == INTEL_PT_SS_NOT_TRACING) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) 		ptq->switch_state = INTEL_PT_SS_UNKNOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) 	} else if (ptq->switch_state == INTEL_PT_SS_UNKNOWN &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) 		   state->to_ip == pt->ptss_ip &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) 		   (ptq->flags & PERF_IP_FLAG_CALL)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) 		ptq->switch_state = INTEL_PT_SS_TRACING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) static u64 intel_pt_switch_ip(struct intel_pt *pt, u64 *ptss_ip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) 	struct machine *machine = pt->machine;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) 	struct map *map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) 	struct symbol *sym, *start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) 	u64 ip, switch_ip = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) 	const char *ptss;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) 	if (ptss_ip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) 		*ptss_ip = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) 	map = machine__kernel_map(machine);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) 	if (!map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) 	if (map__load(map))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) 	start = dso__first_symbol(map->dso);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) 	for (sym = start; sym; sym = dso__next_symbol(sym)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) 		if (sym->binding == STB_GLOBAL &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) 		    !strcmp(sym->name, "__switch_to")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) 			ip = map->unmap_ip(map, sym->start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) 			if (ip >= map->start && ip < map->end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) 				switch_ip = ip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) 	if (!switch_ip || !ptss_ip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) 	if (pt->have_sched_switch == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) 		ptss = "perf_trace_sched_switch";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) 		ptss = "__perf_event_task_sched_out";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) 	for (sym = start; sym; sym = dso__next_symbol(sym)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) 		if (!strcmp(sym->name, ptss)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) 			ip = map->unmap_ip(map, sym->start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) 			if (ip >= map->start && ip < map->end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) 				*ptss_ip = ip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) 	return switch_ip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) static void intel_pt_enable_sync_switch(struct intel_pt *pt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151) 	unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153) 	pt->sync_switch = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) 	for (i = 0; i < pt->queues.nr_queues; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) 		struct auxtrace_queue *queue = &pt->queues.queue_array[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) 		struct intel_pt_queue *ptq = queue->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) 		if (ptq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) 			ptq->sync_switch = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165)  * To filter against time ranges, it is only necessary to look at the next start
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166)  * or end time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) static bool intel_pt_next_time(struct intel_pt_queue *ptq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) 	struct intel_pt *pt = ptq->pt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) 	if (ptq->sel_start) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) 		/* Next time is an end time */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174) 		ptq->sel_start = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) 		ptq->sel_timestamp = pt->time_ranges[ptq->sel_idx].end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177) 	} else if (ptq->sel_idx + 1 < pt->range_cnt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) 		/* Next time is a start time */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) 		ptq->sel_start = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) 		ptq->sel_idx += 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) 		ptq->sel_timestamp = pt->time_ranges[ptq->sel_idx].start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) 	/* No next time */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) 	return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) static int intel_pt_time_filter(struct intel_pt_queue *ptq, u64 *ff_timestamp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) 	while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194) 		if (ptq->sel_start) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) 			if (ptq->timestamp >= ptq->sel_timestamp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) 				/* After start time, so consider next time */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197) 				intel_pt_next_time(ptq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) 				if (!ptq->sel_timestamp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199) 					/* No end time */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200) 					return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202) 				/* Check against end time */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) 			/* Before start time, so fast forward */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206) 			ptq->have_sample = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207) 			if (ptq->sel_timestamp > *ff_timestamp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) 				if (ptq->sync_switch) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) 					intel_pt_next_tid(ptq->pt, ptq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) 					ptq->switch_state = INTEL_PT_SS_UNKNOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212) 				*ff_timestamp = ptq->sel_timestamp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) 				err = intel_pt_fast_forward(ptq->decoder,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214) 							    ptq->sel_timestamp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215) 				if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216) 					return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219) 		} else if (ptq->timestamp > ptq->sel_timestamp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220) 			/* After end time, so consider next time */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221) 			if (!intel_pt_next_time(ptq)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) 				/* No next time range, so stop decoding */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223) 				ptq->have_sample = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224) 				ptq->switch_state = INTEL_PT_SS_NOT_TRACING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) 				return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227) 			/* Check against next start time */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) 			/* Before end time */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) static int intel_pt_run_decoder(struct intel_pt_queue *ptq, u64 *timestamp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238) 	const struct intel_pt_state *state = ptq->state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239) 	struct intel_pt *pt = ptq->pt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240) 	u64 ff_timestamp = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243) 	if (!pt->kernel_start) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244) 		pt->kernel_start = machine__kernel_start(pt->machine);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) 		if (pt->per_cpu_mmaps &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246) 		    (pt->have_sched_switch == 1 || pt->have_sched_switch == 3) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247) 		    !pt->timeless_decoding && intel_pt_tracing_kernel(pt) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248) 		    !pt->sampling_mode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249) 			pt->switch_ip = intel_pt_switch_ip(pt, &pt->ptss_ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250) 			if (pt->switch_ip) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251) 				intel_pt_log("switch_ip: %"PRIx64" ptss_ip: %"PRIx64"\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252) 					     pt->switch_ip, pt->ptss_ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253) 				intel_pt_enable_sync_switch(pt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258) 	intel_pt_log("queue %u decoding cpu %d pid %d tid %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259) 		     ptq->queue_nr, ptq->cpu, ptq->pid, ptq->tid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260) 	while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261) 		err = intel_pt_sample(ptq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262) 		if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265) 		state = intel_pt_decode(ptq->decoder);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) 		if (state->err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267) 			if (state->err == INTEL_PT_ERR_NODATA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268) 				return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269) 			if (ptq->sync_switch &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270) 			    state->from_ip >= pt->kernel_start) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271) 				ptq->sync_switch = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272) 				intel_pt_next_tid(pt, ptq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274) 			ptq->timestamp = state->est_timestamp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275) 			if (pt->synth_opts.errors) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276) 				err = intel_ptq_synth_error(ptq, state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277) 				if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278) 					return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283) 		ptq->state = state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284) 		ptq->have_sample = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285) 		intel_pt_sample_flags(ptq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287) 		/* Use estimated TSC upon return to user space */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288) 		if (pt->est_tsc &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289) 		    (state->from_ip >= pt->kernel_start || !state->from_ip) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290) 		    state->to_ip && state->to_ip < pt->kernel_start) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291) 			intel_pt_log("TSC %"PRIx64" est. TSC %"PRIx64"\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292) 				     state->timestamp, state->est_timestamp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293) 			ptq->timestamp = state->est_timestamp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294) 		/* Use estimated TSC in unknown switch state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295) 		} else if (ptq->sync_switch &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296) 			   ptq->switch_state == INTEL_PT_SS_UNKNOWN &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297) 			   intel_pt_is_switch_ip(ptq, state->to_ip) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298) 			   ptq->next_tid == -1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299) 			intel_pt_log("TSC %"PRIx64" est. TSC %"PRIx64"\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300) 				     state->timestamp, state->est_timestamp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301) 			ptq->timestamp = state->est_timestamp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302) 		} else if (state->timestamp > ptq->timestamp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303) 			ptq->timestamp = state->timestamp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306) 		if (ptq->sel_timestamp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307) 			err = intel_pt_time_filter(ptq, &ff_timestamp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308) 			if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309) 				return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312) 		if (!pt->timeless_decoding && ptq->timestamp >= *timestamp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313) 			*timestamp = ptq->timestamp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320) static inline int intel_pt_update_queues(struct intel_pt *pt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322) 	if (pt->queues.new_data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323) 		pt->queues.new_data = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324) 		return intel_pt_setup_queues(pt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329) static int intel_pt_process_queues(struct intel_pt *pt, u64 timestamp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331) 	unsigned int queue_nr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332) 	u64 ts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335) 	while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336) 		struct auxtrace_queue *queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337) 		struct intel_pt_queue *ptq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339) 		if (!pt->heap.heap_cnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342) 		if (pt->heap.heap_array[0].ordinal >= timestamp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2345) 		queue_nr = pt->heap.heap_array[0].queue_nr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2346) 		queue = &pt->queues.queue_array[queue_nr];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2347) 		ptq = queue->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2348) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2349) 		intel_pt_log("queue %u processing 0x%" PRIx64 " to 0x%" PRIx64 "\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2350) 			     queue_nr, pt->heap.heap_array[0].ordinal,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2351) 			     timestamp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2352) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2353) 		auxtrace_heap__pop(&pt->heap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2354) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2355) 		if (pt->heap.heap_cnt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2356) 			ts = pt->heap.heap_array[0].ordinal + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2357) 			if (ts > timestamp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2358) 				ts = timestamp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2359) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2360) 			ts = timestamp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2361) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2362) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2363) 		intel_pt_set_pid_tid_cpu(pt, queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2364) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2365) 		ret = intel_pt_run_decoder(ptq, &ts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2366) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2367) 		if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2368) 			auxtrace_heap__add(&pt->heap, queue_nr, ts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2369) 			return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2370) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2371) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2372) 		if (!ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2373) 			ret = auxtrace_heap__add(&pt->heap, queue_nr, ts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2374) 			if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2375) 				return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2376) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2377) 			ptq->on_heap = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2378) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2379) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2380) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2381) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2382) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2383) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2384) static int intel_pt_process_timeless_queues(struct intel_pt *pt, pid_t tid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2385) 					    u64 time_)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2386) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2387) 	struct auxtrace_queues *queues = &pt->queues;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2388) 	unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2389) 	u64 ts = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2390) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2391) 	for (i = 0; i < queues->nr_queues; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2392) 		struct auxtrace_queue *queue = &pt->queues.queue_array[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2393) 		struct intel_pt_queue *ptq = queue->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2394) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2395) 		if (ptq && (tid == -1 || ptq->tid == tid)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2396) 			ptq->time = time_;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2397) 			intel_pt_set_pid_tid_cpu(pt, queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2398) 			intel_pt_run_decoder(ptq, &ts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2399) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2400) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2401) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2402) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2403) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2404) static void intel_pt_sample_set_pid_tid_cpu(struct intel_pt_queue *ptq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2405) 					    struct auxtrace_queue *queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2406) 					    struct perf_sample *sample)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2407) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2408) 	struct machine *m = ptq->pt->machine;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2409) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2410) 	ptq->pid = sample->pid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2411) 	ptq->tid = sample->tid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2412) 	ptq->cpu = queue->cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2413) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2414) 	intel_pt_log("queue %u cpu %d pid %d tid %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2415) 		     ptq->queue_nr, ptq->cpu, ptq->pid, ptq->tid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2416) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2417) 	thread__zput(ptq->thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2418) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2419) 	if (ptq->tid == -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2420) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2421) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2422) 	if (ptq->pid == -1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2423) 		ptq->thread = machine__find_thread(m, -1, ptq->tid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2424) 		if (ptq->thread)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2425) 			ptq->pid = ptq->thread->pid_;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2426) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2427) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2428) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2429) 	ptq->thread = machine__findnew_thread(m, ptq->pid, ptq->tid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2430) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2431) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2432) static int intel_pt_process_timeless_sample(struct intel_pt *pt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2433) 					    struct perf_sample *sample)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2434) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2435) 	struct auxtrace_queue *queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2436) 	struct intel_pt_queue *ptq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2437) 	u64 ts = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2438) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2439) 	queue = auxtrace_queues__sample_queue(&pt->queues, sample, pt->session);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2440) 	if (!queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2441) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2442) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2443) 	ptq = queue->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2444) 	if (!ptq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2445) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2446) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2447) 	ptq->stop = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2448) 	ptq->time = sample->time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2449) 	intel_pt_sample_set_pid_tid_cpu(ptq, queue, sample);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2450) 	intel_pt_run_decoder(ptq, &ts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2451) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2452) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2453) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2454) static int intel_pt_lost(struct intel_pt *pt, struct perf_sample *sample)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2455) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2456) 	return intel_pt_synth_error(pt, INTEL_PT_ERR_LOST, sample->cpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2457) 				    sample->pid, sample->tid, 0, sample->time);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2458) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2459) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2460) static struct intel_pt_queue *intel_pt_cpu_to_ptq(struct intel_pt *pt, int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2461) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2462) 	unsigned i, j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2463) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2464) 	if (cpu < 0 || !pt->queues.nr_queues)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2465) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2466) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2467) 	if ((unsigned)cpu >= pt->queues.nr_queues)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2468) 		i = pt->queues.nr_queues - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2469) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2470) 		i = cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2471) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2472) 	if (pt->queues.queue_array[i].cpu == cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2473) 		return pt->queues.queue_array[i].priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2474) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2475) 	for (j = 0; i > 0; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2476) 		if (pt->queues.queue_array[--i].cpu == cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2477) 			return pt->queues.queue_array[i].priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2478) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2479) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2480) 	for (; j < pt->queues.nr_queues; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2481) 		if (pt->queues.queue_array[j].cpu == cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2482) 			return pt->queues.queue_array[j].priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2483) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2484) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2485) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2486) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2487) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2488) static int intel_pt_sync_switch(struct intel_pt *pt, int cpu, pid_t tid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2489) 				u64 timestamp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2490) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2491) 	struct intel_pt_queue *ptq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2492) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2493) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2494) 	if (!pt->sync_switch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2495) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2496) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2497) 	ptq = intel_pt_cpu_to_ptq(pt, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2498) 	if (!ptq || !ptq->sync_switch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2499) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2500) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2501) 	switch (ptq->switch_state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2502) 	case INTEL_PT_SS_NOT_TRACING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2503) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2504) 	case INTEL_PT_SS_UNKNOWN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2505) 	case INTEL_PT_SS_TRACING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2506) 		ptq->next_tid = tid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2507) 		ptq->switch_state = INTEL_PT_SS_EXPECTING_SWITCH_IP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2508) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2509) 	case INTEL_PT_SS_EXPECTING_SWITCH_EVENT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2510) 		if (!ptq->on_heap) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2511) 			ptq->timestamp = perf_time_to_tsc(timestamp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2512) 							  &pt->tc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2513) 			err = auxtrace_heap__add(&pt->heap, ptq->queue_nr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2514) 						 ptq->timestamp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2515) 			if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2516) 				return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2517) 			ptq->on_heap = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2518) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2519) 		ptq->switch_state = INTEL_PT_SS_TRACING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2520) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2521) 	case INTEL_PT_SS_EXPECTING_SWITCH_IP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2522) 		intel_pt_log("ERROR: cpu %d expecting switch ip\n", cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2523) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2524) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2525) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2526) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2527) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2528) 	ptq->next_tid = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2529) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2530) 	return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2531) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2532) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2533) static int intel_pt_process_switch(struct intel_pt *pt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2534) 				   struct perf_sample *sample)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2535) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2536) 	struct evsel *evsel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2537) 	pid_t tid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2538) 	int cpu, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2539) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2540) 	evsel = perf_evlist__id2evsel(pt->session->evlist, sample->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2541) 	if (evsel != pt->switch_evsel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2542) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2543) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2544) 	tid = evsel__intval(evsel, sample, "next_pid");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2545) 	cpu = sample->cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2546) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2547) 	intel_pt_log("sched_switch: cpu %d tid %d time %"PRIu64" tsc %#"PRIx64"\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2548) 		     cpu, tid, sample->time, perf_time_to_tsc(sample->time,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2549) 		     &pt->tc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2550) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2551) 	ret = intel_pt_sync_switch(pt, cpu, tid, sample->time);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2552) 	if (ret <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2553) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2554) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2555) 	return machine__set_current_tid(pt->machine, cpu, -1, tid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2556) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2557) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2558) static int intel_pt_context_switch_in(struct intel_pt *pt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2559) 				      struct perf_sample *sample)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2560) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2561) 	pid_t pid = sample->pid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2562) 	pid_t tid = sample->tid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2563) 	int cpu = sample->cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2564) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2565) 	if (pt->sync_switch) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2566) 		struct intel_pt_queue *ptq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2567) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2568) 		ptq = intel_pt_cpu_to_ptq(pt, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2569) 		if (ptq && ptq->sync_switch) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2570) 			ptq->next_tid = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2571) 			switch (ptq->switch_state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2572) 			case INTEL_PT_SS_NOT_TRACING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2573) 			case INTEL_PT_SS_UNKNOWN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2574) 			case INTEL_PT_SS_TRACING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2575) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2576) 			case INTEL_PT_SS_EXPECTING_SWITCH_EVENT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2577) 			case INTEL_PT_SS_EXPECTING_SWITCH_IP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2578) 				ptq->switch_state = INTEL_PT_SS_TRACING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2579) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2580) 			default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2581) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2582) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2583) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2584) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2585) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2586) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2587) 	 * If the current tid has not been updated yet, ensure it is now that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2588) 	 * a "switch in" event has occurred.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2589) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2590) 	if (machine__get_current_tid(pt->machine, cpu) == tid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2591) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2592) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2593) 	return machine__set_current_tid(pt->machine, cpu, pid, tid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2594) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2595) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2596) static int intel_pt_context_switch(struct intel_pt *pt, union perf_event *event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2597) 				   struct perf_sample *sample)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2598) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2599) 	bool out = event->header.misc & PERF_RECORD_MISC_SWITCH_OUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2600) 	pid_t pid, tid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2601) 	int cpu, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2602) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2603) 	cpu = sample->cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2604) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2605) 	if (pt->have_sched_switch == 3) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2606) 		if (!out)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2607) 			return intel_pt_context_switch_in(pt, sample);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2608) 		if (event->header.type != PERF_RECORD_SWITCH_CPU_WIDE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2609) 			pr_err("Expecting CPU-wide context switch event\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2610) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2611) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2612) 		pid = event->context_switch.next_prev_pid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2613) 		tid = event->context_switch.next_prev_tid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2614) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2615) 		if (out)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2616) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2617) 		pid = sample->pid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2618) 		tid = sample->tid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2619) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2620) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2621) 	if (tid == -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2622) 		intel_pt_log("context_switch event has no tid\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2623) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2624) 	ret = intel_pt_sync_switch(pt, cpu, tid, sample->time);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2625) 	if (ret <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2626) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2627) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2628) 	return machine__set_current_tid(pt->machine, cpu, pid, tid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2629) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2630) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2631) static int intel_pt_process_itrace_start(struct intel_pt *pt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2632) 					 union perf_event *event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2633) 					 struct perf_sample *sample)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2634) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2635) 	if (!pt->per_cpu_mmaps)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2636) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2637) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2638) 	intel_pt_log("itrace_start: cpu %d pid %d tid %d time %"PRIu64" tsc %#"PRIx64"\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2639) 		     sample->cpu, event->itrace_start.pid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2640) 		     event->itrace_start.tid, sample->time,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2641) 		     perf_time_to_tsc(sample->time, &pt->tc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2642) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2643) 	return machine__set_current_tid(pt->machine, sample->cpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2644) 					event->itrace_start.pid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2645) 					event->itrace_start.tid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2646) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2647) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2648) static int intel_pt_find_map(struct thread *thread, u8 cpumode, u64 addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2649) 			     struct addr_location *al)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2650) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2651) 	if (!al->map || addr < al->map->start || addr >= al->map->end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2652) 		if (!thread__find_map(thread, cpumode, addr, al))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2653) 			return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2654) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2655) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2656) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2657) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2658) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2659) /* Invalidate all instruction cache entries that overlap the text poke */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2660) static int intel_pt_text_poke(struct intel_pt *pt, union perf_event *event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2661) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2662) 	u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2663) 	u64 addr = event->text_poke.addr + event->text_poke.new_len - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2664) 	/* Assume text poke begins in a basic block no more than 4096 bytes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2665) 	int cnt = 4096 + event->text_poke.new_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2666) 	struct thread *thread = pt->unknown_thread;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2667) 	struct addr_location al = { .map = NULL };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2668) 	struct machine *machine = pt->machine;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2669) 	struct intel_pt_cache_entry *e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2670) 	u64 offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2671) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2672) 	if (!event->text_poke.new_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2673) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2674) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2675) 	for (; cnt; cnt--, addr--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2676) 		if (intel_pt_find_map(thread, cpumode, addr, &al)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2677) 			if (addr < event->text_poke.addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2678) 				return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2679) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2680) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2681) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2682) 		if (!al.map->dso || !al.map->dso->auxtrace_cache)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2683) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2684) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2685) 		offset = al.map->map_ip(al.map, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2686) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2687) 		e = intel_pt_cache_lookup(al.map->dso, machine, offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2688) 		if (!e)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2689) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2690) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2691) 		if (addr + e->byte_cnt + e->length <= event->text_poke.addr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2692) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2693) 			 * No overlap. Working backwards there cannot be another
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2694) 			 * basic block that overlaps the text poke if there is a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2695) 			 * branch instruction before the text poke address.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2696) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2697) 			if (e->branch != INTEL_PT_BR_NO_BRANCH)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2698) 				return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2699) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2700) 			intel_pt_cache_invalidate(al.map->dso, machine, offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2701) 			intel_pt_log("Invalidated instruction cache for %s at %#"PRIx64"\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2702) 				     al.map->dso->long_name, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2703) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2704) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2705) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2706) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2707) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2708) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2709) static int intel_pt_process_event(struct perf_session *session,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2710) 				  union perf_event *event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2711) 				  struct perf_sample *sample,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2712) 				  struct perf_tool *tool)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2713) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2714) 	struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2715) 					   auxtrace);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2716) 	u64 timestamp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2717) 	int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2718) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2719) 	if (dump_trace)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2720) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2721) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2722) 	if (!tool->ordered_events) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2723) 		pr_err("Intel Processor Trace requires ordered events\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2724) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2725) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2726) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2727) 	if (sample->time && sample->time != (u64)-1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2728) 		timestamp = perf_time_to_tsc(sample->time, &pt->tc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2729) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2730) 		timestamp = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2731) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2732) 	if (timestamp || pt->timeless_decoding) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2733) 		err = intel_pt_update_queues(pt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2734) 		if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2735) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2736) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2737) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2738) 	if (pt->timeless_decoding) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2739) 		if (pt->sampling_mode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2740) 			if (sample->aux_sample.size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2741) 				err = intel_pt_process_timeless_sample(pt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2742) 								       sample);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2743) 		} else if (event->header.type == PERF_RECORD_EXIT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2744) 			err = intel_pt_process_timeless_queues(pt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2745) 							       event->fork.tid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2746) 							       sample->time);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2747) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2748) 	} else if (timestamp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2749) 		err = intel_pt_process_queues(pt, timestamp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2750) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2751) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2752) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2753) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2754) 	if (event->header.type == PERF_RECORD_SAMPLE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2755) 		if (pt->synth_opts.add_callchain && !sample->callchain)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2756) 			intel_pt_add_callchain(pt, sample);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2757) 		if (pt->synth_opts.add_last_branch && !sample->branch_stack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2758) 			intel_pt_add_br_stack(pt, sample);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2759) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2760) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2761) 	if (event->header.type == PERF_RECORD_AUX &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2762) 	    (event->aux.flags & PERF_AUX_FLAG_TRUNCATED) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2763) 	    pt->synth_opts.errors) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2764) 		err = intel_pt_lost(pt, sample);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2765) 		if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2766) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2767) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2768) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2769) 	if (pt->switch_evsel && event->header.type == PERF_RECORD_SAMPLE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2770) 		err = intel_pt_process_switch(pt, sample);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2771) 	else if (event->header.type == PERF_RECORD_ITRACE_START)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2772) 		err = intel_pt_process_itrace_start(pt, event, sample);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2773) 	else if (event->header.type == PERF_RECORD_SWITCH ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2774) 		 event->header.type == PERF_RECORD_SWITCH_CPU_WIDE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2775) 		err = intel_pt_context_switch(pt, event, sample);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2776) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2777) 	if (!err && event->header.type == PERF_RECORD_TEXT_POKE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2778) 		err = intel_pt_text_poke(pt, event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2779) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2780) 	if (intel_pt_enable_logging && intel_pt_log_events(pt, sample->time)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2781) 		intel_pt_log("event %u: cpu %d time %"PRIu64" tsc %#"PRIx64" ",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2782) 			     event->header.type, sample->cpu, sample->time, timestamp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2783) 		intel_pt_log_event(event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2784) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2785) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2786) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2787) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2788) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2789) static int intel_pt_flush(struct perf_session *session, struct perf_tool *tool)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2790) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2791) 	struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2792) 					   auxtrace);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2793) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2794) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2795) 	if (dump_trace)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2796) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2797) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2798) 	if (!tool->ordered_events)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2799) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2800) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2801) 	ret = intel_pt_update_queues(pt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2802) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2803) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2804) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2805) 	if (pt->timeless_decoding)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2806) 		return intel_pt_process_timeless_queues(pt, -1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2807) 							MAX_TIMESTAMP - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2808) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2809) 	return intel_pt_process_queues(pt, MAX_TIMESTAMP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2810) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2811) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2812) static void intel_pt_free_events(struct perf_session *session)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2813) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2814) 	struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2815) 					   auxtrace);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2816) 	struct auxtrace_queues *queues = &pt->queues;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2817) 	unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2818) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2819) 	for (i = 0; i < queues->nr_queues; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2820) 		intel_pt_free_queue(queues->queue_array[i].priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2821) 		queues->queue_array[i].priv = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2822) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2823) 	intel_pt_log_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2824) 	auxtrace_queues__free(queues);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2825) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2826) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2827) static void intel_pt_free(struct perf_session *session)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2828) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2829) 	struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2830) 					   auxtrace);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2831) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2832) 	auxtrace_heap__free(&pt->heap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2833) 	intel_pt_free_events(session);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2834) 	session->auxtrace = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2835) 	thread__put(pt->unknown_thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2836) 	addr_filters__exit(&pt->filts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2837) 	zfree(&pt->chain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2838) 	zfree(&pt->filter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2839) 	zfree(&pt->time_ranges);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2840) 	free(pt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2841) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2842) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2843) static bool intel_pt_evsel_is_auxtrace(struct perf_session *session,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2844) 				       struct evsel *evsel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2845) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2846) 	struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2847) 					   auxtrace);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2848) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2849) 	return evsel->core.attr.type == pt->pmu_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2850) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2851) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2852) static int intel_pt_process_auxtrace_event(struct perf_session *session,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2853) 					   union perf_event *event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2854) 					   struct perf_tool *tool __maybe_unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2855) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2856) 	struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2857) 					   auxtrace);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2858) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2859) 	if (!pt->data_queued) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2860) 		struct auxtrace_buffer *buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2861) 		off_t data_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2862) 		int fd = perf_data__fd(session->data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2863) 		int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2864) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2865) 		if (perf_data__is_pipe(session->data)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2866) 			data_offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2867) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2868) 			data_offset = lseek(fd, 0, SEEK_CUR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2869) 			if (data_offset == -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2870) 				return -errno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2871) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2872) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2873) 		err = auxtrace_queues__add_event(&pt->queues, session, event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2874) 						 data_offset, &buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2875) 		if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2876) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2877) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2878) 		/* Dump here now we have copied a piped trace out of the pipe */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2879) 		if (dump_trace) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2880) 			if (auxtrace_buffer__get_data(buffer, fd)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2881) 				intel_pt_dump_event(pt, buffer->data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2882) 						    buffer->size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2883) 				auxtrace_buffer__put_data(buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2884) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2885) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2886) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2887) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2888) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2889) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2890) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2891) static int intel_pt_queue_data(struct perf_session *session,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2892) 			       struct perf_sample *sample,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2893) 			       union perf_event *event, u64 data_offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2894) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2895) 	struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2896) 					   auxtrace);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2897) 	u64 timestamp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2898) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2899) 	if (event) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2900) 		return auxtrace_queues__add_event(&pt->queues, session, event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2901) 						  data_offset, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2902) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2903) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2904) 	if (sample->time && sample->time != (u64)-1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2905) 		timestamp = perf_time_to_tsc(sample->time, &pt->tc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2906) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2907) 		timestamp = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2908) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2909) 	return auxtrace_queues__add_sample(&pt->queues, session, sample,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2910) 					   data_offset, timestamp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2911) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2912) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2913) struct intel_pt_synth {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2914) 	struct perf_tool dummy_tool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2915) 	struct perf_session *session;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2916) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2917) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2918) static int intel_pt_event_synth(struct perf_tool *tool,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2919) 				union perf_event *event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2920) 				struct perf_sample *sample __maybe_unused,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2921) 				struct machine *machine __maybe_unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2922) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2923) 	struct intel_pt_synth *intel_pt_synth =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2924) 			container_of(tool, struct intel_pt_synth, dummy_tool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2925) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2926) 	return perf_session__deliver_synth_event(intel_pt_synth->session, event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2927) 						 NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2928) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2929) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2930) static int intel_pt_synth_event(struct perf_session *session, const char *name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2931) 				struct perf_event_attr *attr, u64 id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2932) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2933) 	struct intel_pt_synth intel_pt_synth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2934) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2935) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2936) 	pr_debug("Synthesizing '%s' event with id %" PRIu64 " sample type %#" PRIx64 "\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2937) 		 name, id, (u64)attr->sample_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2938) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2939) 	memset(&intel_pt_synth, 0, sizeof(struct intel_pt_synth));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2940) 	intel_pt_synth.session = session;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2941) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2942) 	err = perf_event__synthesize_attr(&intel_pt_synth.dummy_tool, attr, 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2943) 					  &id, intel_pt_event_synth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2944) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2945) 		pr_err("%s: failed to synthesize '%s' event type\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2946) 		       __func__, name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2947) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2948) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2949) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2950) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2951) static void intel_pt_set_event_name(struct evlist *evlist, u64 id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2952) 				    const char *name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2953) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2954) 	struct evsel *evsel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2955) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2956) 	evlist__for_each_entry(evlist, evsel) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2957) 		if (evsel->core.id && evsel->core.id[0] == id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2958) 			if (evsel->name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2959) 				zfree(&evsel->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2960) 			evsel->name = strdup(name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2961) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2962) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2963) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2964) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2965) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2966) static struct evsel *intel_pt_evsel(struct intel_pt *pt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2967) 					 struct evlist *evlist)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2968) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2969) 	struct evsel *evsel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2970) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2971) 	evlist__for_each_entry(evlist, evsel) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2972) 		if (evsel->core.attr.type == pt->pmu_type && evsel->core.ids)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2973) 			return evsel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2974) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2975) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2976) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2977) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2978) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2979) static int intel_pt_synth_events(struct intel_pt *pt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2980) 				 struct perf_session *session)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2981) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2982) 	struct evlist *evlist = session->evlist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2983) 	struct evsel *evsel = intel_pt_evsel(pt, evlist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2984) 	struct perf_event_attr attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2985) 	u64 id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2986) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2987) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2988) 	if (!evsel) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2989) 		pr_debug("There are no selected events with Intel Processor Trace data\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2990) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2991) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2992) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2993) 	memset(&attr, 0, sizeof(struct perf_event_attr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2994) 	attr.size = sizeof(struct perf_event_attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2995) 	attr.type = PERF_TYPE_HARDWARE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2996) 	attr.sample_type = evsel->core.attr.sample_type & PERF_SAMPLE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2997) 	attr.sample_type |= PERF_SAMPLE_IP | PERF_SAMPLE_TID |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2998) 			    PERF_SAMPLE_PERIOD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2999) 	if (pt->timeless_decoding)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3000) 		attr.sample_type &= ~(u64)PERF_SAMPLE_TIME;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3001) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3002) 		attr.sample_type |= PERF_SAMPLE_TIME;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3003) 	if (!pt->per_cpu_mmaps)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3004) 		attr.sample_type &= ~(u64)PERF_SAMPLE_CPU;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3005) 	attr.exclude_user = evsel->core.attr.exclude_user;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3006) 	attr.exclude_kernel = evsel->core.attr.exclude_kernel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3007) 	attr.exclude_hv = evsel->core.attr.exclude_hv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3008) 	attr.exclude_host = evsel->core.attr.exclude_host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3009) 	attr.exclude_guest = evsel->core.attr.exclude_guest;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3010) 	attr.sample_id_all = evsel->core.attr.sample_id_all;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3011) 	attr.read_format = evsel->core.attr.read_format;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3012) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3013) 	id = evsel->core.id[0] + 1000000000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3014) 	if (!id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3015) 		id = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3016) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3017) 	if (pt->synth_opts.branches) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3018) 		attr.config = PERF_COUNT_HW_BRANCH_INSTRUCTIONS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3019) 		attr.sample_period = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3020) 		attr.sample_type |= PERF_SAMPLE_ADDR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3021) 		err = intel_pt_synth_event(session, "branches", &attr, id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3022) 		if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3023) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3024) 		pt->sample_branches = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3025) 		pt->branches_sample_type = attr.sample_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3026) 		pt->branches_id = id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3027) 		id += 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3028) 		attr.sample_type &= ~(u64)PERF_SAMPLE_ADDR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3029) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3030) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3031) 	if (pt->synth_opts.callchain)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3032) 		attr.sample_type |= PERF_SAMPLE_CALLCHAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3033) 	if (pt->synth_opts.last_branch) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3034) 		attr.sample_type |= PERF_SAMPLE_BRANCH_STACK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3035) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3036) 		 * We don't use the hardware index, but the sample generation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3037) 		 * code uses the new format branch_stack with this field,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3038) 		 * so the event attributes must indicate that it's present.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3039) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3040) 		attr.branch_sample_type |= PERF_SAMPLE_BRANCH_HW_INDEX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3041) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3042) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3043) 	if (pt->synth_opts.instructions) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3044) 		attr.config = PERF_COUNT_HW_INSTRUCTIONS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3045) 		if (pt->synth_opts.period_type == PERF_ITRACE_PERIOD_NANOSECS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3046) 			attr.sample_period =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3047) 				intel_pt_ns_to_ticks(pt, pt->synth_opts.period);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3048) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3049) 			attr.sample_period = pt->synth_opts.period;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3050) 		err = intel_pt_synth_event(session, "instructions", &attr, id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3051) 		if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3052) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3053) 		pt->sample_instructions = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3054) 		pt->instructions_sample_type = attr.sample_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3055) 		pt->instructions_id = id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3056) 		id += 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3057) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3058) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3059) 	attr.sample_type &= ~(u64)PERF_SAMPLE_PERIOD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3060) 	attr.sample_period = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3061) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3062) 	if (pt->synth_opts.transactions) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3063) 		attr.config = PERF_COUNT_HW_INSTRUCTIONS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3064) 		err = intel_pt_synth_event(session, "transactions", &attr, id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3065) 		if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3066) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3067) 		pt->sample_transactions = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3068) 		pt->transactions_sample_type = attr.sample_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3069) 		pt->transactions_id = id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3070) 		intel_pt_set_event_name(evlist, id, "transactions");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3071) 		id += 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3072) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3073) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3074) 	attr.type = PERF_TYPE_SYNTH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3075) 	attr.sample_type |= PERF_SAMPLE_RAW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3076) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3077) 	if (pt->synth_opts.ptwrites) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3078) 		attr.config = PERF_SYNTH_INTEL_PTWRITE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3079) 		err = intel_pt_synth_event(session, "ptwrite", &attr, id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3080) 		if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3081) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3082) 		pt->sample_ptwrites = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3083) 		pt->ptwrites_sample_type = attr.sample_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3084) 		pt->ptwrites_id = id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3085) 		intel_pt_set_event_name(evlist, id, "ptwrite");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3086) 		id += 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3087) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3088) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3089) 	if (pt->synth_opts.pwr_events) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3090) 		pt->sample_pwr_events = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3091) 		pt->pwr_events_sample_type = attr.sample_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3092) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3093) 		attr.config = PERF_SYNTH_INTEL_CBR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3094) 		err = intel_pt_synth_event(session, "cbr", &attr, id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3095) 		if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3096) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3097) 		pt->cbr_id = id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3098) 		intel_pt_set_event_name(evlist, id, "cbr");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3099) 		id += 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3100) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3101) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3102) 	if (pt->synth_opts.pwr_events && (evsel->core.attr.config & 0x10)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3103) 		attr.config = PERF_SYNTH_INTEL_MWAIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3104) 		err = intel_pt_synth_event(session, "mwait", &attr, id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3105) 		if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3106) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3107) 		pt->mwait_id = id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3108) 		intel_pt_set_event_name(evlist, id, "mwait");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3109) 		id += 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3110) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3111) 		attr.config = PERF_SYNTH_INTEL_PWRE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3112) 		err = intel_pt_synth_event(session, "pwre", &attr, id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3113) 		if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3114) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3115) 		pt->pwre_id = id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3116) 		intel_pt_set_event_name(evlist, id, "pwre");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3117) 		id += 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3118) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3119) 		attr.config = PERF_SYNTH_INTEL_EXSTOP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3120) 		err = intel_pt_synth_event(session, "exstop", &attr, id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3121) 		if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3122) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3123) 		pt->exstop_id = id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3124) 		intel_pt_set_event_name(evlist, id, "exstop");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3125) 		id += 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3126) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3127) 		attr.config = PERF_SYNTH_INTEL_PWRX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3128) 		err = intel_pt_synth_event(session, "pwrx", &attr, id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3129) 		if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3130) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3131) 		pt->pwrx_id = id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3132) 		intel_pt_set_event_name(evlist, id, "pwrx");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3133) 		id += 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3134) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3135) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3136) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3137) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3138) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3139) static void intel_pt_setup_pebs_events(struct intel_pt *pt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3140) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3141) 	struct evsel *evsel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3142) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3143) 	if (!pt->synth_opts.other_events)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3144) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3145) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3146) 	evlist__for_each_entry(pt->session->evlist, evsel) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3147) 		if (evsel->core.attr.aux_output && evsel->core.id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3148) 			pt->sample_pebs = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3149) 			pt->pebs_evsel = evsel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3150) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3151) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3152) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3153) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3154) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3155) static struct evsel *intel_pt_find_sched_switch(struct evlist *evlist)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3156) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3157) 	struct evsel *evsel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3158) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3159) 	evlist__for_each_entry_reverse(evlist, evsel) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3160) 		const char *name = evsel__name(evsel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3161) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3162) 		if (!strcmp(name, "sched:sched_switch"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3163) 			return evsel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3164) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3165) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3166) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3167) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3168) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3169) static bool intel_pt_find_switch(struct evlist *evlist)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3170) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3171) 	struct evsel *evsel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3172) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3173) 	evlist__for_each_entry(evlist, evsel) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3174) 		if (evsel->core.attr.context_switch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3175) 			return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3176) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3177) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3178) 	return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3179) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3180) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3181) static int intel_pt_perf_config(const char *var, const char *value, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3182) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3183) 	struct intel_pt *pt = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3184) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3185) 	if (!strcmp(var, "intel-pt.mispred-all"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3186) 		pt->mispred_all = perf_config_bool(var, value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3187) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3188) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3189) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3190) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3191) /* Find least TSC which converts to ns or later */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3192) static u64 intel_pt_tsc_start(u64 ns, struct intel_pt *pt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3193) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3194) 	u64 tsc, tm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3195) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3196) 	tsc = perf_time_to_tsc(ns, &pt->tc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3197) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3198) 	while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3199) 		tm = tsc_to_perf_time(tsc, &pt->tc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3200) 		if (tm < ns)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3201) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3202) 		tsc -= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3203) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3204) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3205) 	while (tm < ns)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3206) 		tm = tsc_to_perf_time(++tsc, &pt->tc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3207) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3208) 	return tsc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3209) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3210) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3211) /* Find greatest TSC which converts to ns or earlier */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3212) static u64 intel_pt_tsc_end(u64 ns, struct intel_pt *pt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3213) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3214) 	u64 tsc, tm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3215) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3216) 	tsc = perf_time_to_tsc(ns, &pt->tc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3217) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3218) 	while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3219) 		tm = tsc_to_perf_time(tsc, &pt->tc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3220) 		if (tm > ns)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3221) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3222) 		tsc += 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3223) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3224) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3225) 	while (tm > ns)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3226) 		tm = tsc_to_perf_time(--tsc, &pt->tc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3227) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3228) 	return tsc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3229) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3230) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3231) static int intel_pt_setup_time_ranges(struct intel_pt *pt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3232) 				      struct itrace_synth_opts *opts)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3233) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3234) 	struct perf_time_interval *p = opts->ptime_range;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3235) 	int n = opts->range_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3236) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3237) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3238) 	if (!n || !p || pt->timeless_decoding)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3239) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3240) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3241) 	pt->time_ranges = calloc(n, sizeof(struct range));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3242) 	if (!pt->time_ranges)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3243) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3244) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3245) 	pt->range_cnt = n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3246) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3247) 	intel_pt_log("%s: %u range(s)\n", __func__, n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3248) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3249) 	for (i = 0; i < n; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3250) 		struct range *r = &pt->time_ranges[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3251) 		u64 ts = p[i].start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3252) 		u64 te = p[i].end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3253) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3254) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3255) 		 * Take care to ensure the TSC range matches the perf-time range
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3256) 		 * when converted back to perf-time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3257) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3258) 		r->start = ts ? intel_pt_tsc_start(ts, pt) : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3259) 		r->end   = te ? intel_pt_tsc_end(te, pt) : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3260) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3261) 		intel_pt_log("range %d: perf time interval: %"PRIu64" to %"PRIu64"\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3262) 			     i, ts, te);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3263) 		intel_pt_log("range %d: TSC time interval: %#"PRIx64" to %#"PRIx64"\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3264) 			     i, r->start, r->end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3265) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3266) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3267) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3268) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3269) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3270) static const char * const intel_pt_info_fmts[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3271) 	[INTEL_PT_PMU_TYPE]		= "  PMU Type            %"PRId64"\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3272) 	[INTEL_PT_TIME_SHIFT]		= "  Time Shift          %"PRIu64"\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3273) 	[INTEL_PT_TIME_MULT]		= "  Time Muliplier      %"PRIu64"\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3274) 	[INTEL_PT_TIME_ZERO]		= "  Time Zero           %"PRIu64"\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3275) 	[INTEL_PT_CAP_USER_TIME_ZERO]	= "  Cap Time Zero       %"PRId64"\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3276) 	[INTEL_PT_TSC_BIT]		= "  TSC bit             %#"PRIx64"\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3277) 	[INTEL_PT_NORETCOMP_BIT]	= "  NoRETComp bit       %#"PRIx64"\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3278) 	[INTEL_PT_HAVE_SCHED_SWITCH]	= "  Have sched_switch   %"PRId64"\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3279) 	[INTEL_PT_SNAPSHOT_MODE]	= "  Snapshot mode       %"PRId64"\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3280) 	[INTEL_PT_PER_CPU_MMAPS]	= "  Per-cpu maps        %"PRId64"\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3281) 	[INTEL_PT_MTC_BIT]		= "  MTC bit             %#"PRIx64"\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3282) 	[INTEL_PT_TSC_CTC_N]		= "  TSC:CTC numerator   %"PRIu64"\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3283) 	[INTEL_PT_TSC_CTC_D]		= "  TSC:CTC denominator %"PRIu64"\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3284) 	[INTEL_PT_CYC_BIT]		= "  CYC bit             %#"PRIx64"\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3285) 	[INTEL_PT_MAX_NONTURBO_RATIO]	= "  Max non-turbo ratio %"PRIu64"\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3286) 	[INTEL_PT_FILTER_STR_LEN]	= "  Filter string len.  %"PRIu64"\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3287) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3288) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3289) static void intel_pt_print_info(__u64 *arr, int start, int finish)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3290) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3291) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3292) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3293) 	if (!dump_trace)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3294) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3295) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3296) 	for (i = start; i <= finish; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3297) 		fprintf(stdout, intel_pt_info_fmts[i], arr[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3298) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3299) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3300) static void intel_pt_print_info_str(const char *name, const char *str)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3301) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3302) 	if (!dump_trace)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3303) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3304) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3305) 	fprintf(stdout, "  %-20s%s\n", name, str ? str : "");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3306) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3307) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3308) static bool intel_pt_has(struct perf_record_auxtrace_info *auxtrace_info, int pos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3309) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3310) 	return auxtrace_info->header.size >=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3311) 		sizeof(struct perf_record_auxtrace_info) + (sizeof(u64) * (pos + 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3312) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3313) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3314) int intel_pt_process_auxtrace_info(union perf_event *event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3315) 				   struct perf_session *session)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3316) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3317) 	struct perf_record_auxtrace_info *auxtrace_info = &event->auxtrace_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3318) 	size_t min_sz = sizeof(u64) * INTEL_PT_PER_CPU_MMAPS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3319) 	struct intel_pt *pt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3320) 	void *info_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3321) 	__u64 *info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3322) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3323) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3324) 	if (auxtrace_info->header.size < sizeof(struct perf_record_auxtrace_info) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3325) 					min_sz)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3326) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3327) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3328) 	pt = zalloc(sizeof(struct intel_pt));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3329) 	if (!pt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3330) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3331) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3332) 	addr_filters__init(&pt->filts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3333) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3334) 	err = perf_config(intel_pt_perf_config, pt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3335) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3336) 		goto err_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3337) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3338) 	err = auxtrace_queues__init(&pt->queues);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3339) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3340) 		goto err_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3341) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3342) 	intel_pt_log_set_name(INTEL_PT_PMU_NAME);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3343) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3344) 	pt->session = session;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3345) 	pt->machine = &session->machines.host; /* No kvm support */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3346) 	pt->auxtrace_type = auxtrace_info->type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3347) 	pt->pmu_type = auxtrace_info->priv[INTEL_PT_PMU_TYPE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3348) 	pt->tc.time_shift = auxtrace_info->priv[INTEL_PT_TIME_SHIFT];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3349) 	pt->tc.time_mult = auxtrace_info->priv[INTEL_PT_TIME_MULT];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3350) 	pt->tc.time_zero = auxtrace_info->priv[INTEL_PT_TIME_ZERO];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3351) 	pt->cap_user_time_zero = auxtrace_info->priv[INTEL_PT_CAP_USER_TIME_ZERO];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3352) 	pt->tsc_bit = auxtrace_info->priv[INTEL_PT_TSC_BIT];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3353) 	pt->noretcomp_bit = auxtrace_info->priv[INTEL_PT_NORETCOMP_BIT];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3354) 	pt->have_sched_switch = auxtrace_info->priv[INTEL_PT_HAVE_SCHED_SWITCH];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3355) 	pt->snapshot_mode = auxtrace_info->priv[INTEL_PT_SNAPSHOT_MODE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3356) 	pt->per_cpu_mmaps = auxtrace_info->priv[INTEL_PT_PER_CPU_MMAPS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3357) 	intel_pt_print_info(&auxtrace_info->priv[0], INTEL_PT_PMU_TYPE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3358) 			    INTEL_PT_PER_CPU_MMAPS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3359) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3360) 	if (intel_pt_has(auxtrace_info, INTEL_PT_CYC_BIT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3361) 		pt->mtc_bit = auxtrace_info->priv[INTEL_PT_MTC_BIT];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3362) 		pt->mtc_freq_bits = auxtrace_info->priv[INTEL_PT_MTC_FREQ_BITS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3363) 		pt->tsc_ctc_ratio_n = auxtrace_info->priv[INTEL_PT_TSC_CTC_N];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3364) 		pt->tsc_ctc_ratio_d = auxtrace_info->priv[INTEL_PT_TSC_CTC_D];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3365) 		pt->cyc_bit = auxtrace_info->priv[INTEL_PT_CYC_BIT];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3366) 		intel_pt_print_info(&auxtrace_info->priv[0], INTEL_PT_MTC_BIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3367) 				    INTEL_PT_CYC_BIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3368) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3369) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3370) 	if (intel_pt_has(auxtrace_info, INTEL_PT_MAX_NONTURBO_RATIO)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3371) 		pt->max_non_turbo_ratio =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3372) 			auxtrace_info->priv[INTEL_PT_MAX_NONTURBO_RATIO];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3373) 		intel_pt_print_info(&auxtrace_info->priv[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3374) 				    INTEL_PT_MAX_NONTURBO_RATIO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3375) 				    INTEL_PT_MAX_NONTURBO_RATIO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3376) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3377) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3378) 	info = &auxtrace_info->priv[INTEL_PT_FILTER_STR_LEN] + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3379) 	info_end = (void *)info + auxtrace_info->header.size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3380) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3381) 	if (intel_pt_has(auxtrace_info, INTEL_PT_FILTER_STR_LEN)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3382) 		size_t len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3383) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3384) 		len = auxtrace_info->priv[INTEL_PT_FILTER_STR_LEN];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3385) 		intel_pt_print_info(&auxtrace_info->priv[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3386) 				    INTEL_PT_FILTER_STR_LEN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3387) 				    INTEL_PT_FILTER_STR_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3388) 		if (len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3389) 			const char *filter = (const char *)info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3390) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3391) 			len = roundup(len + 1, 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3392) 			info += len >> 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3393) 			if ((void *)info > info_end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3394) 				pr_err("%s: bad filter string length\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3395) 				err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3396) 				goto err_free_queues;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3397) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3398) 			pt->filter = memdup(filter, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3399) 			if (!pt->filter) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3400) 				err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3401) 				goto err_free_queues;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3402) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3403) 			if (session->header.needs_swap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3404) 				mem_bswap_64(pt->filter, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3405) 			if (pt->filter[len - 1]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3406) 				pr_err("%s: filter string not null terminated\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3407) 				err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3408) 				goto err_free_queues;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3409) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3410) 			err = addr_filters__parse_bare_filter(&pt->filts,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3411) 							      filter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3412) 			if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3413) 				goto err_free_queues;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3414) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3415) 		intel_pt_print_info_str("Filter string", pt->filter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3416) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3417) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3418) 	pt->timeless_decoding = intel_pt_timeless_decoding(pt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3419) 	if (pt->timeless_decoding && !pt->tc.time_mult)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3420) 		pt->tc.time_mult = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3421) 	pt->have_tsc = intel_pt_have_tsc(pt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3422) 	pt->sampling_mode = intel_pt_sampling_mode(pt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3423) 	pt->est_tsc = !pt->timeless_decoding;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3424) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3425) 	pt->unknown_thread = thread__new(999999999, 999999999);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3426) 	if (!pt->unknown_thread) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3427) 		err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3428) 		goto err_free_queues;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3429) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3430) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3431) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3432) 	 * Since this thread will not be kept in any rbtree not in a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3433) 	 * list, initialize its list node so that at thread__put() the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3434) 	 * current thread lifetime assuption is kept and we don't segfault
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3435) 	 * at list_del_init().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3436) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3437) 	INIT_LIST_HEAD(&pt->unknown_thread->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3438) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3439) 	err = thread__set_comm(pt->unknown_thread, "unknown", 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3440) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3441) 		goto err_delete_thread;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3442) 	if (thread__init_maps(pt->unknown_thread, pt->machine)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3443) 		err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3444) 		goto err_delete_thread;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3445) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3446) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3447) 	pt->auxtrace.process_event = intel_pt_process_event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3448) 	pt->auxtrace.process_auxtrace_event = intel_pt_process_auxtrace_event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3449) 	pt->auxtrace.queue_data = intel_pt_queue_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3450) 	pt->auxtrace.dump_auxtrace_sample = intel_pt_dump_sample;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3451) 	pt->auxtrace.flush_events = intel_pt_flush;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3452) 	pt->auxtrace.free_events = intel_pt_free_events;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3453) 	pt->auxtrace.free = intel_pt_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3454) 	pt->auxtrace.evsel_is_auxtrace = intel_pt_evsel_is_auxtrace;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3455) 	session->auxtrace = &pt->auxtrace;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3456) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3457) 	if (dump_trace)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3458) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3459) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3460) 	if (pt->have_sched_switch == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3461) 		pt->switch_evsel = intel_pt_find_sched_switch(session->evlist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3462) 		if (!pt->switch_evsel) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3463) 			pr_err("%s: missing sched_switch event\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3464) 			err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3465) 			goto err_delete_thread;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3466) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3467) 	} else if (pt->have_sched_switch == 2 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3468) 		   !intel_pt_find_switch(session->evlist)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3469) 		pr_err("%s: missing context_switch attribute flag\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3470) 		err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3471) 		goto err_delete_thread;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3472) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3473) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3474) 	if (session->itrace_synth_opts->set) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3475) 		pt->synth_opts = *session->itrace_synth_opts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3476) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3477) 		itrace_synth_opts__set_default(&pt->synth_opts,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3478) 				session->itrace_synth_opts->default_no_sample);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3479) 		if (!session->itrace_synth_opts->default_no_sample &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3480) 		    !session->itrace_synth_opts->inject) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3481) 			pt->synth_opts.branches = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3482) 			pt->synth_opts.callchain = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3483) 			pt->synth_opts.add_callchain = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3484) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3485) 		pt->synth_opts.thread_stack =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3486) 				session->itrace_synth_opts->thread_stack;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3487) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3488) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3489) 	if (pt->synth_opts.log)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3490) 		intel_pt_log_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3491) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3492) 	/* Maximum non-turbo ratio is TSC freq / 100 MHz */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3493) 	if (pt->tc.time_mult) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3494) 		u64 tsc_freq = intel_pt_ns_to_ticks(pt, 1000000000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3495) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3496) 		if (!pt->max_non_turbo_ratio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3497) 			pt->max_non_turbo_ratio =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3498) 					(tsc_freq + 50000000) / 100000000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3499) 		intel_pt_log("TSC frequency %"PRIu64"\n", tsc_freq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3500) 		intel_pt_log("Maximum non-turbo ratio %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3501) 			     pt->max_non_turbo_ratio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3502) 		pt->cbr2khz = tsc_freq / pt->max_non_turbo_ratio / 1000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3503) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3504) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3505) 	err = intel_pt_setup_time_ranges(pt, session->itrace_synth_opts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3506) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3507) 		goto err_delete_thread;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3508) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3509) 	if (pt->synth_opts.calls)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3510) 		pt->branches_filter |= PERF_IP_FLAG_CALL | PERF_IP_FLAG_ASYNC |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3511) 				       PERF_IP_FLAG_TRACE_END;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3512) 	if (pt->synth_opts.returns)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3513) 		pt->branches_filter |= PERF_IP_FLAG_RETURN |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3514) 				       PERF_IP_FLAG_TRACE_BEGIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3515) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3516) 	if ((pt->synth_opts.callchain || pt->synth_opts.add_callchain) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3517) 	    !symbol_conf.use_callchain) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3518) 		symbol_conf.use_callchain = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3519) 		if (callchain_register_param(&callchain_param) < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3520) 			symbol_conf.use_callchain = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3521) 			pt->synth_opts.callchain = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3522) 			pt->synth_opts.add_callchain = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3523) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3524) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3525) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3526) 	if (pt->synth_opts.add_callchain) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3527) 		err = intel_pt_callchain_init(pt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3528) 		if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3529) 			goto err_delete_thread;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3530) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3531) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3532) 	if (pt->synth_opts.last_branch || pt->synth_opts.add_last_branch) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3533) 		pt->br_stack_sz = pt->synth_opts.last_branch_sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3534) 		pt->br_stack_sz_plus = pt->br_stack_sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3535) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3536) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3537) 	if (pt->synth_opts.add_last_branch) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3538) 		err = intel_pt_br_stack_init(pt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3539) 		if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3540) 			goto err_delete_thread;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3541) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3542) 		 * Additional branch stack size to cater for tracing from the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3543) 		 * actual sample ip to where the sample time is recorded.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3544) 		 * Measured at about 200 branches, but generously set to 1024.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3545) 		 * If kernel space is not being traced, then add just 1 for the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3546) 		 * branch to kernel space.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3547) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3548) 		if (intel_pt_tracing_kernel(pt))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3549) 			pt->br_stack_sz_plus += 1024;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3550) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3551) 			pt->br_stack_sz_plus += 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3552) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3553) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3554) 	pt->use_thread_stack = pt->synth_opts.callchain ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3555) 			       pt->synth_opts.add_callchain ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3556) 			       pt->synth_opts.thread_stack ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3557) 			       pt->synth_opts.last_branch ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3558) 			       pt->synth_opts.add_last_branch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3559) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3560) 	pt->callstack = pt->synth_opts.callchain ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3561) 			pt->synth_opts.add_callchain ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3562) 			pt->synth_opts.thread_stack;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3563) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3564) 	err = intel_pt_synth_events(pt, session);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3565) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3566) 		goto err_delete_thread;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3567) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3568) 	intel_pt_setup_pebs_events(pt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3569) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3570) 	if (pt->sampling_mode || list_empty(&session->auxtrace_index))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3571) 		err = auxtrace_queue_data(session, true, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3572) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3573) 		err = auxtrace_queues__process_index(&pt->queues, session);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3574) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3575) 		goto err_delete_thread;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3576) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3577) 	if (pt->queues.populated)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3578) 		pt->data_queued = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3579) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3580) 	if (pt->timeless_decoding)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3581) 		pr_debug2("Intel PT decoding without timestamps\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3582) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3583) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3584) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3585) err_delete_thread:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3586) 	zfree(&pt->chain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3587) 	thread__zput(pt->unknown_thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3588) err_free_queues:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3589) 	intel_pt_log_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3590) 	auxtrace_queues__free(&pt->queues);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3591) 	session->auxtrace = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3592) err_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3593) 	addr_filters__exit(&pt->filts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3594) 	zfree(&pt->filter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3595) 	zfree(&pt->time_ranges);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3596) 	free(pt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3597) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3598) }