Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3)  * Copyright(C) 2015-2018 Linaro Limited.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5)  * Author: Tor Jeremiassen <tor@ti.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6)  * Author: Mathieu Poirier <mathieu.poirier@linaro.org>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9) #include <linux/bitops.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10) #include <linux/err.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12) #include <linux/log2.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14) #include <linux/zalloc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16) #include <opencsd/ocsd_if_types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17) #include <stdlib.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19) #include "auxtrace.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20) #include "color.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21) #include "cs-etm.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22) #include "cs-etm-decoder/cs-etm-decoder.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23) #include "debug.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24) #include "dso.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25) #include "evlist.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26) #include "intlist.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27) #include "machine.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28) #include "map.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29) #include "perf.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30) #include "session.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31) #include "map_symbol.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32) #include "branch.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33) #include "symbol.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34) #include "tool.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35) #include "thread.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36) #include "thread-stack.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37) #include <tools/libc_compat.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38) #include "util/synthetic-events.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40) #define MAX_TIMESTAMP (~0ULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42) struct cs_etm_auxtrace {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43) 	struct auxtrace auxtrace;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44) 	struct auxtrace_queues queues;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45) 	struct auxtrace_heap heap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46) 	struct itrace_synth_opts synth_opts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47) 	struct perf_session *session;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48) 	struct machine *machine;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49) 	struct thread *unknown_thread;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51) 	u8 timeless_decoding;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52) 	u8 snapshot_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53) 	u8 data_queued;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54) 	u8 sample_branches;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55) 	u8 sample_instructions;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57) 	int num_cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58) 	u32 auxtrace_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59) 	u64 branches_sample_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60) 	u64 branches_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61) 	u64 instructions_sample_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62) 	u64 instructions_sample_period;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63) 	u64 instructions_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64) 	u64 **metadata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65) 	u64 kernel_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66) 	unsigned int pmu_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) struct cs_etm_traceid_queue {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) 	u8 trace_chan_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) 	pid_t pid, tid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72) 	u64 period_instructions;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73) 	size_t last_branch_pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74) 	union perf_event *event_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75) 	struct thread *thread;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76) 	struct branch_stack *last_branch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77) 	struct branch_stack *last_branch_rb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78) 	struct cs_etm_packet *prev_packet;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79) 	struct cs_etm_packet *packet;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80) 	struct cs_etm_packet_queue packet_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83) struct cs_etm_queue {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84) 	struct cs_etm_auxtrace *etm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85) 	struct cs_etm_decoder *decoder;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86) 	struct auxtrace_buffer *buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87) 	unsigned int queue_nr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88) 	u8 pending_timestamp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89) 	u64 offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) 	const unsigned char *buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) 	size_t buf_len, buf_used;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92) 	/* Conversion between traceID and index in traceid_queues array */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) 	struct intlist *traceid_queues_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) 	struct cs_etm_traceid_queue **traceid_queues;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97) /* RB tree for quick conversion between traceID and metadata pointers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98) static struct intlist *traceid_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100) static int cs_etm__update_queues(struct cs_etm_auxtrace *etm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101) static int cs_etm__process_queues(struct cs_etm_auxtrace *etm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102) static int cs_etm__process_timeless_queues(struct cs_etm_auxtrace *etm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103) 					   pid_t tid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104) static int cs_etm__get_data_block(struct cs_etm_queue *etmq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) static int cs_etm__decode_data_block(struct cs_etm_queue *etmq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) /* PTMs ETMIDR [11:8] set to b0011 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) #define ETMIDR_PTM_VERSION 0x00000300
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111)  * A struct auxtrace_heap_item only has a queue_nr and a timestamp to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112)  * work with.  One option is to modify to auxtrace_heap_XYZ() API or simply
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113)  * encode the etm queue number as the upper 16 bit and the channel as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114)  * the lower 16 bit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116) #define TO_CS_QUEUE_NR(queue_nr, trace_chan_id)	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) 		      (queue_nr << 16 | trace_chan_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118) #define TO_QUEUE_NR(cs_queue_nr) (cs_queue_nr >> 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119) #define TO_TRACE_CHAN_ID(cs_queue_nr) (cs_queue_nr & 0x0000ffff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121) static u32 cs_etm__get_v7_protocol_version(u32 etmidr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123) 	etmidr &= ETMIDR_PTM_VERSION;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125) 	if (etmidr == ETMIDR_PTM_VERSION)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126) 		return CS_ETM_PROTO_PTM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128) 	return CS_ETM_PROTO_ETMV3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131) static int cs_etm__get_magic(u8 trace_chan_id, u64 *magic)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133) 	struct int_node *inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134) 	u64 *metadata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136) 	inode = intlist__find(traceid_list, trace_chan_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137) 	if (!inode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140) 	metadata = inode->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141) 	*magic = metadata[CS_ETM_MAGIC];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145) int cs_etm__get_cpu(u8 trace_chan_id, int *cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147) 	struct int_node *inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148) 	u64 *metadata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150) 	inode = intlist__find(traceid_list, trace_chan_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151) 	if (!inode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154) 	metadata = inode->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155) 	*cpu = (int)metadata[CS_ETM_CPU];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159) void cs_etm__etmq_set_traceid_queue_timestamp(struct cs_etm_queue *etmq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160) 					      u8 trace_chan_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) 	 * Wnen a timestamp packet is encountered the backend code
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) 	 * is stopped so that the front end has time to process packets
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) 	 * that were accumulated in the traceID queue.  Since there can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) 	 * be more than one channel per cs_etm_queue, we need to specify
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) 	 * what traceID queue needs servicing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) 	etmq->pending_timestamp = trace_chan_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) static u64 cs_etm__etmq_get_timestamp(struct cs_etm_queue *etmq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173) 				      u8 *trace_chan_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175) 	struct cs_etm_packet_queue *packet_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177) 	if (!etmq->pending_timestamp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180) 	if (trace_chan_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181) 		*trace_chan_id = etmq->pending_timestamp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183) 	packet_queue = cs_etm__etmq_get_packet_queue(etmq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184) 						     etmq->pending_timestamp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185) 	if (!packet_queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188) 	/* Acknowledge pending status */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189) 	etmq->pending_timestamp = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191) 	/* See function cs_etm_decoder__do_{hard|soft}_timestamp() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192) 	return packet_queue->timestamp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195) static void cs_etm__clear_packet_queue(struct cs_etm_packet_queue *queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199) 	queue->head = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200) 	queue->tail = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201) 	queue->packet_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202) 	for (i = 0; i < CS_ETM_PACKET_MAX_BUFFER; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) 		queue->packet_buffer[i].isa = CS_ETM_ISA_UNKNOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) 		queue->packet_buffer[i].start_addr = CS_ETM_INVAL_ADDR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) 		queue->packet_buffer[i].end_addr = CS_ETM_INVAL_ADDR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) 		queue->packet_buffer[i].instr_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) 		queue->packet_buffer[i].last_instr_taken_branch = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) 		queue->packet_buffer[i].last_instr_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) 		queue->packet_buffer[i].last_instr_type = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) 		queue->packet_buffer[i].last_instr_subtype = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) 		queue->packet_buffer[i].last_instr_cond = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) 		queue->packet_buffer[i].flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213) 		queue->packet_buffer[i].exception_number = UINT32_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214) 		queue->packet_buffer[i].trace_chan_id = UINT8_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215) 		queue->packet_buffer[i].cpu = INT_MIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219) static void cs_etm__clear_all_packet_queues(struct cs_etm_queue *etmq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221) 	int idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222) 	struct int_node *inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223) 	struct cs_etm_traceid_queue *tidq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224) 	struct intlist *traceid_queues_list = etmq->traceid_queues_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226) 	intlist__for_each_entry(inode, traceid_queues_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227) 		idx = (int)(intptr_t)inode->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228) 		tidq = etmq->traceid_queues[idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229) 		cs_etm__clear_packet_queue(&tidq->packet_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) static int cs_etm__init_traceid_queue(struct cs_etm_queue *etmq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) 				      struct cs_etm_traceid_queue *tidq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) 				      u8 trace_chan_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) 	int rc = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) 	struct auxtrace_queue *queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) 	struct cs_etm_auxtrace *etm = etmq->etm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) 	cs_etm__clear_packet_queue(&tidq->packet_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) 	queue = &etmq->etm->queues.queue_array[etmq->queue_nr];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) 	tidq->tid = queue->tid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) 	tidq->pid = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) 	tidq->trace_chan_id = trace_chan_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248) 	tidq->packet = zalloc(sizeof(struct cs_etm_packet));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) 	if (!tidq->packet)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) 	tidq->prev_packet = zalloc(sizeof(struct cs_etm_packet));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253) 	if (!tidq->prev_packet)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254) 		goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256) 	if (etm->synth_opts.last_branch) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257) 		size_t sz = sizeof(struct branch_stack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259) 		sz += etm->synth_opts.last_branch_sz *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260) 		      sizeof(struct branch_entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261) 		tidq->last_branch = zalloc(sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262) 		if (!tidq->last_branch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263) 			goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264) 		tidq->last_branch_rb = zalloc(sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265) 		if (!tidq->last_branch_rb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266) 			goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) 	tidq->event_buf = malloc(PERF_SAMPLE_MAX_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) 	if (!tidq->event_buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) 		goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) out_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) 	zfree(&tidq->last_branch_rb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277) 	zfree(&tidq->last_branch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) 	zfree(&tidq->prev_packet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279) 	zfree(&tidq->packet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284) static struct cs_etm_traceid_queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285) *cs_etm__etmq_get_traceid_queue(struct cs_etm_queue *etmq, u8 trace_chan_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287) 	int idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) 	struct int_node *inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289) 	struct intlist *traceid_queues_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290) 	struct cs_etm_traceid_queue *tidq, **traceid_queues;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291) 	struct cs_etm_auxtrace *etm = etmq->etm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293) 	if (etm->timeless_decoding)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294) 		trace_chan_id = CS_ETM_PER_THREAD_TRACEID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) 	traceid_queues_list = etmq->traceid_queues_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) 	 * Check if the traceid_queue exist for this traceID by looking
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) 	 * in the queue list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302) 	inode = intlist__find(traceid_queues_list, trace_chan_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) 	if (inode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) 		idx = (int)(intptr_t)inode->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) 		return etmq->traceid_queues[idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) 	/* We couldn't find a traceid_queue for this traceID, allocate one */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309) 	tidq = malloc(sizeof(*tidq));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310) 	if (!tidq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313) 	memset(tidq, 0, sizeof(*tidq));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315) 	/* Get a valid index for the new traceid_queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316) 	idx = intlist__nr_entries(traceid_queues_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317) 	/* Memory for the inode is free'ed in cs_etm_free_traceid_queues () */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318) 	inode = intlist__findnew(traceid_queues_list, trace_chan_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) 	if (!inode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) 		goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322) 	/* Associate this traceID with this index */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323) 	inode->priv = (void *)(intptr_t)idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) 	if (cs_etm__init_traceid_queue(etmq, tidq, trace_chan_id))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326) 		goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328) 	/* Grow the traceid_queues array by one unit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329) 	traceid_queues = etmq->traceid_queues;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330) 	traceid_queues = reallocarray(traceid_queues,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331) 				      idx + 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332) 				      sizeof(*traceid_queues));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335) 	 * On failure reallocarray() returns NULL and the original block of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336) 	 * memory is left untouched.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338) 	if (!traceid_queues)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339) 		goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341) 	traceid_queues[idx] = tidq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342) 	etmq->traceid_queues = traceid_queues;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344) 	return etmq->traceid_queues[idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346) out_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) 	 * Function intlist__remove() removes the inode from the list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) 	 * and delete the memory associated to it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) 	intlist__remove(traceid_queues_list, inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) 	free(tidq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357) struct cs_etm_packet_queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358) *cs_etm__etmq_get_packet_queue(struct cs_etm_queue *etmq, u8 trace_chan_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) 	struct cs_etm_traceid_queue *tidq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362) 	tidq = cs_etm__etmq_get_traceid_queue(etmq, trace_chan_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363) 	if (tidq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364) 		return &tidq->packet_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) static void cs_etm__packet_swap(struct cs_etm_auxtrace *etm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) 				struct cs_etm_traceid_queue *tidq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) 	struct cs_etm_packet *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) 	if (etm->sample_branches || etm->synth_opts.last_branch ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) 	    etm->sample_instructions) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) 		 * Swap PACKET with PREV_PACKET: PACKET becomes PREV_PACKET for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) 		 * the next incoming packet.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) 		tmp = tidq->packet;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) 		tidq->packet = tidq->prev_packet;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) 		tidq->prev_packet = tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) static void cs_etm__packet_dump(const char *pkt_string)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) 	const char *color = PERF_COLOR_BLUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) 	int len = strlen(pkt_string);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) 	if (len && (pkt_string[len-1] == '\n'))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) 		color_fprintf(stdout, color, "	%s", pkt_string);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) 		color_fprintf(stdout, color, "	%s\n", pkt_string);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) 	fflush(stdout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) static void cs_etm__set_trace_param_etmv3(struct cs_etm_trace_params *t_params,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) 					  struct cs_etm_auxtrace *etm, int idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) 					  u32 etmidr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) 	u64 **metadata = etm->metadata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) 	t_params[idx].protocol = cs_etm__get_v7_protocol_version(etmidr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) 	t_params[idx].etmv3.reg_ctrl = metadata[idx][CS_ETM_ETMCR];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) 	t_params[idx].etmv3.reg_trc_id = metadata[idx][CS_ETM_ETMTRACEIDR];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410) static void cs_etm__set_trace_param_etmv4(struct cs_etm_trace_params *t_params,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411) 					  struct cs_etm_auxtrace *etm, int idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) 	u64 **metadata = etm->metadata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) 	t_params[idx].protocol = CS_ETM_PROTO_ETMV4i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) 	t_params[idx].etmv4.reg_idr0 = metadata[idx][CS_ETMV4_TRCIDR0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) 	t_params[idx].etmv4.reg_idr1 = metadata[idx][CS_ETMV4_TRCIDR1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) 	t_params[idx].etmv4.reg_idr2 = metadata[idx][CS_ETMV4_TRCIDR2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) 	t_params[idx].etmv4.reg_idr8 = metadata[idx][CS_ETMV4_TRCIDR8];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) 	t_params[idx].etmv4.reg_configr = metadata[idx][CS_ETMV4_TRCCONFIGR];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) 	t_params[idx].etmv4.reg_traceidr = metadata[idx][CS_ETMV4_TRCTRACEIDR];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) static int cs_etm__init_trace_params(struct cs_etm_trace_params *t_params,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425) 				     struct cs_etm_auxtrace *etm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) 	u32 etmidr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) 	u64 architecture;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) 	for (i = 0; i < etm->num_cpu; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) 		architecture = etm->metadata[i][CS_ETM_MAGIC];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) 		switch (architecture) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) 		case __perf_cs_etmv3_magic:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) 			etmidr = etm->metadata[i][CS_ETM_ETMIDR];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) 			cs_etm__set_trace_param_etmv3(t_params, etm, i, etmidr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) 		case __perf_cs_etmv4_magic:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) 			cs_etm__set_trace_param_etmv4(t_params, etm, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) 		default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) static int cs_etm__init_decoder_params(struct cs_etm_decoder_params *d_params,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) 				       struct cs_etm_queue *etmq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) 				       enum cs_etm_decoder_operation mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) 	int ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) 	if (!(mode < CS_ETM_OPERATION_MAX))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) 	d_params->packet_printer = cs_etm__packet_dump;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) 	d_params->operation = mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) 	d_params->data = etmq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) 	d_params->formatted = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) 	d_params->fsyncs = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) 	d_params->hsyncs = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) 	d_params->frame_aligned = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) 	ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) static void cs_etm__dump_event(struct cs_etm_auxtrace *etm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) 			       struct auxtrace_buffer *buffer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) 	const char *color = PERF_COLOR_BLUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) 	struct cs_etm_decoder_params d_params;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478) 	struct cs_etm_trace_params *t_params;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) 	struct cs_etm_decoder *decoder;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) 	size_t buffer_used = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) 	fprintf(stdout, "\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) 	color_fprintf(stdout, color,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) 		     ". ... CoreSight ETM Trace data: size %zu bytes\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) 		     buffer->size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) 	/* Use metadata to fill in trace parameters for trace decoder */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) 	t_params = zalloc(sizeof(*t_params) * etm->num_cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) 	if (!t_params)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) 	if (cs_etm__init_trace_params(t_params, etm))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) 		goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) 	/* Set decoder parameters to simply print the trace packets */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497) 	if (cs_etm__init_decoder_params(&d_params, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) 					CS_ETM_OPERATION_PRINT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) 		goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) 	decoder = cs_etm_decoder__new(etm->num_cpu, &d_params, t_params);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) 	if (!decoder)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) 		goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) 		size_t consumed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) 		ret = cs_etm_decoder__process_data_block(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) 				decoder, buffer->offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) 				&((u8 *)buffer->data)[buffer_used],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) 				buffer->size - buffer_used, &consumed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) 		buffer_used += consumed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516) 	} while (buffer_used < buffer->size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) 	cs_etm_decoder__free(decoder);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) out_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) 	zfree(&t_params);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) static int cs_etm__flush_events(struct perf_session *session,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) 				struct perf_tool *tool)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) 	struct cs_etm_auxtrace *etm = container_of(session->auxtrace,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529) 						   struct cs_etm_auxtrace,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530) 						   auxtrace);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) 	if (dump_trace)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) 	if (!tool->ordered_events)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) 	ret = cs_etm__update_queues(etm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) 	if (etm->timeless_decoding)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) 		return cs_etm__process_timeless_queues(etm, -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) 	return cs_etm__process_queues(etm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) static void cs_etm__free_traceid_queues(struct cs_etm_queue *etmq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) 	int idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) 	uintptr_t priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) 	struct int_node *inode, *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553) 	struct cs_etm_traceid_queue *tidq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554) 	struct intlist *traceid_queues_list = etmq->traceid_queues_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556) 	intlist__for_each_entry_safe(inode, tmp, traceid_queues_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557) 		priv = (uintptr_t)inode->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558) 		idx = priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560) 		/* Free this traceid_queue from the array */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561) 		tidq = etmq->traceid_queues[idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562) 		thread__zput(tidq->thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563) 		zfree(&tidq->event_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564) 		zfree(&tidq->last_branch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) 		zfree(&tidq->last_branch_rb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) 		zfree(&tidq->prev_packet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) 		zfree(&tidq->packet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) 		zfree(&tidq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571) 		 * Function intlist__remove() removes the inode from the list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572) 		 * and delete the memory associated to it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574) 		intlist__remove(traceid_queues_list, inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577) 	/* Then the RB tree itself */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578) 	intlist__delete(traceid_queues_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) 	etmq->traceid_queues_list = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) 	/* finally free the traceid_queues array */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) 	zfree(&etmq->traceid_queues);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585) static void cs_etm__free_queue(void *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) 	struct cs_etm_queue *etmq = priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589) 	if (!etmq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) 	cs_etm_decoder__free(etmq->decoder);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) 	cs_etm__free_traceid_queues(etmq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) 	free(etmq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) static void cs_etm__free_events(struct perf_session *session)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) 	unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) 	struct cs_etm_auxtrace *aux = container_of(session->auxtrace,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) 						   struct cs_etm_auxtrace,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) 						   auxtrace);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) 	struct auxtrace_queues *queues = &aux->queues;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) 	for (i = 0; i < queues->nr_queues; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) 		cs_etm__free_queue(queues->queue_array[i].priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) 		queues->queue_array[i].priv = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) 	auxtrace_queues__free(queues);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) static void cs_etm__free(struct perf_session *session)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) 	struct int_node *inode, *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) 	struct cs_etm_auxtrace *aux = container_of(session->auxtrace,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) 						   struct cs_etm_auxtrace,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) 						   auxtrace);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) 	cs_etm__free_events(session);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) 	session->auxtrace = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) 	/* First remove all traceID/metadata nodes for the RB tree */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) 	intlist__for_each_entry_safe(inode, tmp, traceid_list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) 		intlist__remove(traceid_list, inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) 	/* Then the RB tree itself */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) 	intlist__delete(traceid_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) 	for (i = 0; i < aux->num_cpu; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) 		zfree(&aux->metadata[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) 	thread__zput(aux->unknown_thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) 	zfree(&aux->metadata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) 	zfree(&aux);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) static bool cs_etm__evsel_is_auxtrace(struct perf_session *session,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) 				      struct evsel *evsel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640) 	struct cs_etm_auxtrace *aux = container_of(session->auxtrace,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641) 						   struct cs_etm_auxtrace,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642) 						   auxtrace);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644) 	return evsel->core.attr.type == aux->pmu_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) static u8 cs_etm__cpu_mode(struct cs_etm_queue *etmq, u64 address)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) 	struct machine *machine;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651) 	machine = etmq->etm->machine;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) 	if (address >= etmq->etm->kernel_start) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) 		if (machine__is_host(machine))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) 			return PERF_RECORD_MISC_KERNEL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) 			return PERF_RECORD_MISC_GUEST_KERNEL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) 		if (machine__is_host(machine))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) 			return PERF_RECORD_MISC_USER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) 		else if (perf_guest)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) 			return PERF_RECORD_MISC_GUEST_USER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) 			return PERF_RECORD_MISC_HYPERVISOR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) static u32 cs_etm__mem_access(struct cs_etm_queue *etmq, u8 trace_chan_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) 			      u64 address, size_t size, u8 *buffer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) 	u8  cpumode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) 	u64 offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) 	int len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) 	struct thread *thread;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) 	struct machine *machine;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) 	struct addr_location al;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677) 	struct cs_etm_traceid_queue *tidq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) 	if (!etmq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) 	machine = etmq->etm->machine;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) 	cpumode = cs_etm__cpu_mode(etmq, address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) 	tidq = cs_etm__etmq_get_traceid_queue(etmq, trace_chan_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) 	if (!tidq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) 	thread = tidq->thread;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) 	if (!thread) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) 		if (cpumode != PERF_RECORD_MISC_KERNEL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) 		thread = etmq->etm->unknown_thread;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) 	if (!thread__find_map(thread, cpumode, address, &al) || !al.map->dso)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) 	if (al.map->dso->data.status == DSO_DATA_STATUS_ERROR &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) 	    dso__data_status_seen(al.map->dso, DSO_DATA_STATUS_SEEN_ITRACE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) 	offset = al.map->map_ip(al.map, address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) 	map__load(al.map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) 	len = dso__data_read_offset(al.map->dso, machine, offset, buffer, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) 	if (len <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) 	return len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) static struct cs_etm_queue *cs_etm__alloc_queue(struct cs_etm_auxtrace *etm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) 	struct cs_etm_decoder_params d_params;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) 	struct cs_etm_trace_params  *t_params = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) 	struct cs_etm_queue *etmq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) 	etmq = zalloc(sizeof(*etmq));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) 	if (!etmq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) 	etmq->traceid_queues_list = intlist__new(NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) 	if (!etmq->traceid_queues_list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) 		goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) 	/* Use metadata to fill in trace parameters for trace decoder */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) 	t_params = zalloc(sizeof(*t_params) * etm->num_cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) 	if (!t_params)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) 		goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) 	if (cs_etm__init_trace_params(t_params, etm))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) 		goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) 	/* Set decoder parameters to decode trace packets */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) 	if (cs_etm__init_decoder_params(&d_params, etmq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) 					CS_ETM_OPERATION_DECODE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) 		goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) 	etmq->decoder = cs_etm_decoder__new(etm->num_cpu, &d_params, t_params);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) 	if (!etmq->decoder)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) 		goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) 	 * Register a function to handle all memory accesses required by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) 	 * the trace decoder library.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) 	if (cs_etm_decoder__add_mem_access_cb(etmq->decoder,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) 					      0x0L, ((u64) -1L),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) 					      cs_etm__mem_access))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) 		goto out_free_decoder;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756) 	zfree(&t_params);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) 	return etmq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) out_free_decoder:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) 	cs_etm_decoder__free(etmq->decoder);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) out_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762) 	intlist__delete(etmq->traceid_queues_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) 	free(etmq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) static int cs_etm__setup_queue(struct cs_etm_auxtrace *etm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) 			       struct auxtrace_queue *queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) 			       unsigned int queue_nr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) 	unsigned int cs_queue_nr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) 	u8 trace_chan_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) 	u64 timestamp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) 	struct cs_etm_queue *etmq = queue->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) 	if (list_empty(&queue->head) || etmq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) 	etmq = cs_etm__alloc_queue(etm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) 	if (!etmq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) 		ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) 	queue->priv = etmq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) 	etmq->etm = etm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) 	etmq->queue_nr = queue_nr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) 	etmq->offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) 	if (etm->timeless_decoding)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) 	 * We are under a CPU-wide trace scenario.  As such we need to know
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) 	 * when the code that generated the traces started to execute so that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) 	 * it can be correlated with execution on other CPUs.  So we get a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) 	 * handle on the beginning of traces and decode until we find a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) 	 * timestamp.  The timestamp is then added to the auxtrace min heap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) 	 * in order to know what nibble (of all the etmqs) to decode first.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) 	while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) 		 * Fetch an aux_buffer from this etmq.  Bail if no more
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) 		 * blocks or an error has been encountered.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) 		ret = cs_etm__get_data_block(etmq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) 		if (ret <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) 		 * Run decoder on the trace block.  The decoder will stop when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) 		 * encountering a timestamp, a full packet queue or the end of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) 		 * trace for that block.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) 		ret = cs_etm__decode_data_block(etmq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) 		 * Function cs_etm_decoder__do_{hard|soft}_timestamp() does all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) 		 * the timestamp calculation for us.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) 		timestamp = cs_etm__etmq_get_timestamp(etmq, &trace_chan_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) 		/* We found a timestamp, no need to continue. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) 		if (timestamp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) 		 * We didn't find a timestamp so empty all the traceid packet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) 		 * queues before looking for another timestamp packet, either
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) 		 * in the current data block or a new one.  Packets that were
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836) 		 * just decoded are useless since no timestamp has been
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) 		 * associated with them.  As such simply discard them.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) 		cs_etm__clear_all_packet_queues(etmq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) 	 * We have a timestamp.  Add it to the min heap to reflect when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) 	 * instructions conveyed by the range packets of this traceID queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) 	 * started to execute.  Once the same has been done for all the traceID
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) 	 * queues of each etmq, redenring and decoding can start in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) 	 * chronological order.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) 	 * Note that packets decoded above are still in the traceID's packet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) 	 * queue and will be processed in cs_etm__process_queues().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) 	cs_queue_nr = TO_CS_QUEUE_NR(queue_nr, trace_chan_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) 	ret = auxtrace_heap__add(&etm->heap, cs_queue_nr, timestamp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) static int cs_etm__setup_queues(struct cs_etm_auxtrace *etm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) 	unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) 	if (!etm->kernel_start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) 		etm->kernel_start = machine__kernel_start(etm->machine);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) 	for (i = 0; i < etm->queues.nr_queues; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) 		ret = cs_etm__setup_queue(etm, &etm->queues.queue_array[i], i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) 			return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) static int cs_etm__update_queues(struct cs_etm_auxtrace *etm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) 	if (etm->queues.new_data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) 		etm->queues.new_data = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) 		return cs_etm__setup_queues(etm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) static inline
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) void cs_etm__copy_last_branch_rb(struct cs_etm_queue *etmq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) 				 struct cs_etm_traceid_queue *tidq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) 	struct branch_stack *bs_src = tidq->last_branch_rb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) 	struct branch_stack *bs_dst = tidq->last_branch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) 	size_t nr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) 	 * Set the number of records before early exit: ->nr is used to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) 	 * determine how many branches to copy from ->entries.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) 	bs_dst->nr = bs_src->nr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900) 	 * Early exit when there is nothing to copy.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) 	if (!bs_src->nr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) 	 * As bs_src->entries is a circular buffer, we need to copy from it in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) 	 * two steps.  First, copy the branches from the most recently inserted
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) 	 * branch ->last_branch_pos until the end of bs_src->entries buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910) 	nr = etmq->etm->synth_opts.last_branch_sz - tidq->last_branch_pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) 	memcpy(&bs_dst->entries[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) 	       &bs_src->entries[tidq->last_branch_pos],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) 	       sizeof(struct branch_entry) * nr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) 	 * If we wrapped around at least once, the branches from the beginning
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) 	 * of the bs_src->entries buffer and until the ->last_branch_pos element
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) 	 * are older valid branches: copy them over.  The total number of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) 	 * branches copied over will be equal to the number of branches asked by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) 	 * the user in last_branch_sz.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) 	if (bs_src->nr >= etmq->etm->synth_opts.last_branch_sz) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) 		memcpy(&bs_dst->entries[nr],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) 		       &bs_src->entries[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) 		       sizeof(struct branch_entry) * tidq->last_branch_pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) static inline
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) void cs_etm__reset_last_branch_rb(struct cs_etm_traceid_queue *tidq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932) 	tidq->last_branch_pos = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933) 	tidq->last_branch_rb->nr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936) static inline int cs_etm__t32_instr_size(struct cs_etm_queue *etmq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937) 					 u8 trace_chan_id, u64 addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939) 	u8 instrBytes[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) 	cs_etm__mem_access(etmq, trace_chan_id, addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) 			   ARRAY_SIZE(instrBytes), instrBytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) 	 * T32 instruction size is indicated by bits[15:11] of the first
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) 	 * 16-bit word of the instruction: 0b11101, 0b11110 and 0b11111
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) 	 * denote a 32-bit instruction.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) 	return ((instrBytes[1] & 0xF8) >= 0xE8) ? 4 : 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) static inline u64 cs_etm__first_executed_instr(struct cs_etm_packet *packet)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) 	/* Returns 0 for the CS_ETM_DISCONTINUITY packet */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) 	if (packet->sample_type == CS_ETM_DISCONTINUITY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) 	return packet->start_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) static inline
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) u64 cs_etm__last_executed_instr(const struct cs_etm_packet *packet)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) 	/* Returns 0 for the CS_ETM_DISCONTINUITY packet */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) 	if (packet->sample_type == CS_ETM_DISCONTINUITY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) 	return packet->end_addr - packet->last_instr_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) static inline u64 cs_etm__instr_addr(struct cs_etm_queue *etmq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) 				     u64 trace_chan_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) 				     const struct cs_etm_packet *packet,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) 				     u64 offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) 	if (packet->isa == CS_ETM_ISA_T32) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) 		u64 addr = packet->start_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) 		while (offset) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) 			addr += cs_etm__t32_instr_size(etmq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) 						       trace_chan_id, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) 			offset--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) 		return addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) 	/* Assume a 4 byte instruction size (A32/A64) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) 	return packet->start_addr + offset * 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) static void cs_etm__update_last_branch_rb(struct cs_etm_queue *etmq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) 					  struct cs_etm_traceid_queue *tidq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) 	struct branch_stack *bs = tidq->last_branch_rb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) 	struct branch_entry *be;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) 	 * The branches are recorded in a circular buffer in reverse
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) 	 * chronological order: we start recording from the last element of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) 	 * buffer down.  After writing the first element of the stack, move the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) 	 * insert position back to the end of the buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) 	if (!tidq->last_branch_pos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) 		tidq->last_branch_pos = etmq->etm->synth_opts.last_branch_sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) 	tidq->last_branch_pos -= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) 	be       = &bs->entries[tidq->last_branch_pos];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) 	be->from = cs_etm__last_executed_instr(tidq->prev_packet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) 	be->to	 = cs_etm__first_executed_instr(tidq->packet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) 	/* No support for mispredict */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) 	be->flags.mispred = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) 	be->flags.predicted = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) 	 * Increment bs->nr until reaching the number of last branches asked by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) 	 * the user on the command line.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) 	if (bs->nr < etmq->etm->synth_opts.last_branch_sz)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) 		bs->nr += 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) static int cs_etm__inject_event(union perf_event *event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) 			       struct perf_sample *sample, u64 type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) 	event->header.size = perf_event__sample_event_size(sample, type, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) 	return perf_event__synthesize_sample(event, type, 0, sample);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) cs_etm__get_trace(struct cs_etm_queue *etmq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) 	struct auxtrace_buffer *aux_buffer = etmq->buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) 	struct auxtrace_buffer *old_buffer = aux_buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) 	struct auxtrace_queue *queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) 	queue = &etmq->etm->queues.queue_array[etmq->queue_nr];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) 	aux_buffer = auxtrace_buffer__next(queue, aux_buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) 	/* If no more data, drop the previous auxtrace_buffer and return */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) 	if (!aux_buffer) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) 		if (old_buffer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) 			auxtrace_buffer__drop_data(old_buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) 		etmq->buf_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) 	etmq->buffer = aux_buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) 	/* If the aux_buffer doesn't have data associated, try to load it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) 	if (!aux_buffer->data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) 		/* get the file desc associated with the perf data file */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) 		int fd = perf_data__fd(etmq->etm->session->data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) 		aux_buffer->data = auxtrace_buffer__get_data(aux_buffer, fd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) 		if (!aux_buffer->data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) 			return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) 	/* If valid, drop the previous buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) 	if (old_buffer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) 		auxtrace_buffer__drop_data(old_buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) 	etmq->buf_used = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) 	etmq->buf_len = aux_buffer->size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) 	etmq->buf = aux_buffer->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) 	return etmq->buf_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) static void cs_etm__set_pid_tid_cpu(struct cs_etm_auxtrace *etm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) 				    struct cs_etm_traceid_queue *tidq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) 	if ((!tidq->thread) && (tidq->tid != -1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) 		tidq->thread = machine__find_thread(etm->machine, -1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) 						    tidq->tid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) 	if (tidq->thread)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) 		tidq->pid = tidq->thread->pid_;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) int cs_etm__etmq_set_tid(struct cs_etm_queue *etmq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) 			 pid_t tid, u8 trace_chan_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) 	int cpu, err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) 	struct cs_etm_auxtrace *etm = etmq->etm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) 	struct cs_etm_traceid_queue *tidq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) 	tidq = cs_etm__etmq_get_traceid_queue(etmq, trace_chan_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) 	if (!tidq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) 	if (cs_etm__get_cpu(trace_chan_id, &cpu) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) 	err = machine__set_current_tid(etm->machine, cpu, tid, tid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) 	tidq->tid = tid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) 	thread__zput(tidq->thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) 	cs_etm__set_pid_tid_cpu(etm, tidq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) bool cs_etm__etmq_is_timeless(struct cs_etm_queue *etmq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) 	return !!etmq->etm->timeless_decoding;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) static void cs_etm__copy_insn(struct cs_etm_queue *etmq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) 			      u64 trace_chan_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) 			      const struct cs_etm_packet *packet,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) 			      struct perf_sample *sample)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) 	 * It's pointless to read instructions for the CS_ETM_DISCONTINUITY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) 	 * packet, so directly bail out with 'insn_len' = 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) 	if (packet->sample_type == CS_ETM_DISCONTINUITY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) 		sample->insn_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) 	 * T32 instruction size might be 32-bit or 16-bit, decide by calling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) 	 * cs_etm__t32_instr_size().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) 	if (packet->isa == CS_ETM_ISA_T32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) 		sample->insn_len = cs_etm__t32_instr_size(etmq, trace_chan_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) 							  sample->ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) 	/* Otherwise, A64 and A32 instruction size are always 32-bit. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) 		sample->insn_len = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) 	cs_etm__mem_access(etmq, trace_chan_id, sample->ip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) 			   sample->insn_len, (void *)sample->insn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) static int cs_etm__synth_instruction_sample(struct cs_etm_queue *etmq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) 					    struct cs_etm_traceid_queue *tidq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) 					    u64 addr, u64 period)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) 	struct cs_etm_auxtrace *etm = etmq->etm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) 	union perf_event *event = tidq->event_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) 	struct perf_sample sample = {.ip = 0,};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) 	event->sample.header.type = PERF_RECORD_SAMPLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) 	event->sample.header.misc = cs_etm__cpu_mode(etmq, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) 	event->sample.header.size = sizeof(struct perf_event_header);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) 	sample.ip = addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) 	sample.pid = tidq->pid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) 	sample.tid = tidq->tid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) 	sample.id = etmq->etm->instructions_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) 	sample.stream_id = etmq->etm->instructions_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) 	sample.period = period;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) 	sample.cpu = tidq->packet->cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) 	sample.flags = tidq->prev_packet->flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) 	sample.cpumode = event->sample.header.misc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) 	cs_etm__copy_insn(etmq, tidq->trace_chan_id, tidq->packet, &sample);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) 	if (etm->synth_opts.last_branch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) 		sample.branch_stack = tidq->last_branch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) 	if (etm->synth_opts.inject) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) 		ret = cs_etm__inject_event(event, &sample,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) 					   etm->instructions_sample_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) 			return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) 	ret = perf_session__deliver_synth_event(etm->session, event, &sample);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) 		pr_err(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) 			"CS ETM Trace: failed to deliver instruction event, error %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) 			ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188)  * The cs etm packet encodes an instruction range between a branch target
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189)  * and the next taken branch. Generate sample accordingly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) static int cs_etm__synth_branch_sample(struct cs_etm_queue *etmq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) 				       struct cs_etm_traceid_queue *tidq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) 	struct cs_etm_auxtrace *etm = etmq->etm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) 	struct perf_sample sample = {.ip = 0,};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) 	union perf_event *event = tidq->event_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) 	struct dummy_branch_stack {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) 		u64			nr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) 		u64			hw_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) 		struct branch_entry	entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) 	} dummy_bs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) 	u64 ip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) 	ip = cs_etm__last_executed_instr(tidq->prev_packet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) 	event->sample.header.type = PERF_RECORD_SAMPLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) 	event->sample.header.misc = cs_etm__cpu_mode(etmq, ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) 	event->sample.header.size = sizeof(struct perf_event_header);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) 	sample.ip = ip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) 	sample.pid = tidq->pid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) 	sample.tid = tidq->tid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) 	sample.addr = cs_etm__first_executed_instr(tidq->packet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) 	sample.id = etmq->etm->branches_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) 	sample.stream_id = etmq->etm->branches_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) 	sample.period = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) 	sample.cpu = tidq->packet->cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) 	sample.flags = tidq->prev_packet->flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) 	sample.cpumode = event->sample.header.misc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) 	cs_etm__copy_insn(etmq, tidq->trace_chan_id, tidq->prev_packet,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) 			  &sample);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) 	 * perf report cannot handle events without a branch stack
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) 	if (etm->synth_opts.last_branch) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) 		dummy_bs = (struct dummy_branch_stack){
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) 			.nr = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) 			.hw_idx = -1ULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) 			.entries = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) 				.from = sample.ip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) 				.to = sample.addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) 			},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) 		};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) 		sample.branch_stack = (struct branch_stack *)&dummy_bs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) 	if (etm->synth_opts.inject) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) 		ret = cs_etm__inject_event(event, &sample,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) 					   etm->branches_sample_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) 			return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) 	ret = perf_session__deliver_synth_event(etm->session, event, &sample);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) 		pr_err(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) 		"CS ETM Trace: failed to deliver instruction event, error %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) 		ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) struct cs_etm_synth {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) 	struct perf_tool dummy_tool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) 	struct perf_session *session;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) static int cs_etm__event_synth(struct perf_tool *tool,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) 			       union perf_event *event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) 			       struct perf_sample *sample __maybe_unused,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) 			       struct machine *machine __maybe_unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) 	struct cs_etm_synth *cs_etm_synth =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) 		      container_of(tool, struct cs_etm_synth, dummy_tool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) 	return perf_session__deliver_synth_event(cs_etm_synth->session,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) 						 event, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) static int cs_etm__synth_event(struct perf_session *session,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) 			       struct perf_event_attr *attr, u64 id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) 	struct cs_etm_synth cs_etm_synth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) 	memset(&cs_etm_synth, 0, sizeof(struct cs_etm_synth));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) 	cs_etm_synth.session = session;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) 	return perf_event__synthesize_attr(&cs_etm_synth.dummy_tool, attr, 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) 					   &id, cs_etm__event_synth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) static int cs_etm__synth_events(struct cs_etm_auxtrace *etm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) 				struct perf_session *session)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) 	struct evlist *evlist = session->evlist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) 	struct evsel *evsel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) 	struct perf_event_attr attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) 	bool found = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) 	u64 id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) 	evlist__for_each_entry(evlist, evsel) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) 		if (evsel->core.attr.type == etm->pmu_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) 			found = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) 	if (!found) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) 		pr_debug("No selected events with CoreSight Trace data\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) 	memset(&attr, 0, sizeof(struct perf_event_attr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) 	attr.size = sizeof(struct perf_event_attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) 	attr.type = PERF_TYPE_HARDWARE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) 	attr.sample_type = evsel->core.attr.sample_type & PERF_SAMPLE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) 	attr.sample_type |= PERF_SAMPLE_IP | PERF_SAMPLE_TID |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) 			    PERF_SAMPLE_PERIOD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) 	if (etm->timeless_decoding)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) 		attr.sample_type &= ~(u64)PERF_SAMPLE_TIME;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) 		attr.sample_type |= PERF_SAMPLE_TIME;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) 	attr.exclude_user = evsel->core.attr.exclude_user;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) 	attr.exclude_kernel = evsel->core.attr.exclude_kernel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) 	attr.exclude_hv = evsel->core.attr.exclude_hv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) 	attr.exclude_host = evsel->core.attr.exclude_host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) 	attr.exclude_guest = evsel->core.attr.exclude_guest;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) 	attr.sample_id_all = evsel->core.attr.sample_id_all;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) 	attr.read_format = evsel->core.attr.read_format;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) 	/* create new id val to be a fixed offset from evsel id */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) 	id = evsel->core.id[0] + 1000000000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) 	if (!id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) 		id = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) 	if (etm->synth_opts.branches) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) 		attr.config = PERF_COUNT_HW_BRANCH_INSTRUCTIONS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) 		attr.sample_period = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) 		attr.sample_type |= PERF_SAMPLE_ADDR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) 		err = cs_etm__synth_event(session, &attr, id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) 		if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) 		etm->sample_branches = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) 		etm->branches_sample_type = attr.sample_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) 		etm->branches_id = id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) 		id += 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) 		attr.sample_type &= ~(u64)PERF_SAMPLE_ADDR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) 	if (etm->synth_opts.last_branch) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) 		attr.sample_type |= PERF_SAMPLE_BRANCH_STACK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) 		 * We don't use the hardware index, but the sample generation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) 		 * code uses the new format branch_stack with this field,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) 		 * so the event attributes must indicate that it's present.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) 		attr.branch_sample_type |= PERF_SAMPLE_BRANCH_HW_INDEX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) 	if (etm->synth_opts.instructions) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) 		attr.config = PERF_COUNT_HW_INSTRUCTIONS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) 		attr.sample_period = etm->synth_opts.period;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) 		etm->instructions_sample_period = attr.sample_period;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) 		err = cs_etm__synth_event(session, &attr, id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) 		if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) 		etm->sample_instructions = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) 		etm->instructions_sample_type = attr.sample_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) 		etm->instructions_id = id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) 		id += 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) static int cs_etm__sample(struct cs_etm_queue *etmq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) 			  struct cs_etm_traceid_queue *tidq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) 	struct cs_etm_auxtrace *etm = etmq->etm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) 	u8 trace_chan_id = tidq->trace_chan_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) 	u64 instrs_prev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) 	/* Get instructions remainder from previous packet */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) 	instrs_prev = tidq->period_instructions;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) 	tidq->period_instructions += tidq->packet->instr_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) 	 * Record a branch when the last instruction in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) 	 * PREV_PACKET is a branch.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) 	if (etm->synth_opts.last_branch &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) 	    tidq->prev_packet->sample_type == CS_ETM_RANGE &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) 	    tidq->prev_packet->last_instr_taken_branch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) 		cs_etm__update_last_branch_rb(etmq, tidq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) 	if (etm->sample_instructions &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) 	    tidq->period_instructions >= etm->instructions_sample_period) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) 		 * Emit instruction sample periodically
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) 		 * TODO: allow period to be defined in cycles and clock time
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) 		 * Below diagram demonstrates the instruction samples
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) 		 * generation flows:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) 		 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) 		 *    Instrs     Instrs       Instrs       Instrs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) 		 *   Sample(n)  Sample(n+1)  Sample(n+2)  Sample(n+3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) 		 *    |            |            |            |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) 		 *    V            V            V            V
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) 		 *   --------------------------------------------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) 		 *            ^                                  ^
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) 		 *            |                                  |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) 		 *         Period                             Period
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) 		 *    instructions(Pi)                   instructions(Pi')
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) 		 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) 		 *            |                                  |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) 		 *            \---------------- -----------------/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) 		 *                             V
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) 		 *                 tidq->packet->instr_count
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) 		 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) 		 * Instrs Sample(n...) are the synthesised samples occurring
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) 		 * every etm->instructions_sample_period instructions - as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) 		 * defined on the perf command line.  Sample(n) is being the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) 		 * last sample before the current etm packet, n+1 to n+3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) 		 * samples are generated from the current etm packet.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) 		 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) 		 * tidq->packet->instr_count represents the number of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) 		 * instructions in the current etm packet.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) 		 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) 		 * Period instructions (Pi) contains the the number of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) 		 * instructions executed after the sample point(n) from the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) 		 * previous etm packet.  This will always be less than
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) 		 * etm->instructions_sample_period.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) 		 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) 		 * When generate new samples, it combines with two parts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) 		 * instructions, one is the tail of the old packet and another
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) 		 * is the head of the new coming packet, to generate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) 		 * sample(n+1); sample(n+2) and sample(n+3) consume the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) 		 * instructions with sample period.  After sample(n+3), the rest
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) 		 * instructions will be used by later packet and it is assigned
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) 		 * to tidq->period_instructions for next round calculation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) 		 * Get the initial offset into the current packet instructions;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) 		 * entry conditions ensure that instrs_prev is less than
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) 		 * etm->instructions_sample_period.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) 		u64 offset = etm->instructions_sample_period - instrs_prev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) 		u64 addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) 		/* Prepare last branches for instruction sample */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) 		if (etm->synth_opts.last_branch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) 			cs_etm__copy_last_branch_rb(etmq, tidq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) 		while (tidq->period_instructions >=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) 				etm->instructions_sample_period) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) 			 * Calculate the address of the sampled instruction (-1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) 			 * as sample is reported as though instruction has just
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) 			 * been executed, but PC has not advanced to next
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) 			 * instruction)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) 			addr = cs_etm__instr_addr(etmq, trace_chan_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) 						  tidq->packet, offset - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) 			ret = cs_etm__synth_instruction_sample(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) 				etmq, tidq, addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) 				etm->instructions_sample_period);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) 			if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) 				return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) 			offset += etm->instructions_sample_period;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) 			tidq->period_instructions -=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) 				etm->instructions_sample_period;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) 	if (etm->sample_branches) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) 		bool generate_sample = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) 		/* Generate sample for tracing on packet */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) 		if (tidq->prev_packet->sample_type == CS_ETM_DISCONTINUITY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) 			generate_sample = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) 		/* Generate sample for branch taken packet */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) 		if (tidq->prev_packet->sample_type == CS_ETM_RANGE &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) 		    tidq->prev_packet->last_instr_taken_branch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) 			generate_sample = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) 		if (generate_sample) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) 			ret = cs_etm__synth_branch_sample(etmq, tidq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) 			if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) 				return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) 	cs_etm__packet_swap(etm, tidq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) static int cs_etm__exception(struct cs_etm_traceid_queue *tidq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) 	 * When the exception packet is inserted, whether the last instruction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) 	 * in previous range packet is taken branch or not, we need to force
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) 	 * to set 'prev_packet->last_instr_taken_branch' to true.  This ensures
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) 	 * to generate branch sample for the instruction range before the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) 	 * exception is trapped to kernel or before the exception returning.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) 	 * The exception packet includes the dummy address values, so don't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) 	 * swap PACKET with PREV_PACKET.  This keeps PREV_PACKET to be useful
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) 	 * for generating instruction and branch samples.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) 	if (tidq->prev_packet->sample_type == CS_ETM_RANGE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) 		tidq->prev_packet->last_instr_taken_branch = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) static int cs_etm__flush(struct cs_etm_queue *etmq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) 			 struct cs_etm_traceid_queue *tidq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) 	int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) 	struct cs_etm_auxtrace *etm = etmq->etm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) 	/* Handle start tracing packet */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) 	if (tidq->prev_packet->sample_type == CS_ETM_EMPTY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) 		goto swap_packet;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) 	if (etmq->etm->synth_opts.last_branch &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) 	    tidq->prev_packet->sample_type == CS_ETM_RANGE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) 		u64 addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) 		/* Prepare last branches for instruction sample */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) 		cs_etm__copy_last_branch_rb(etmq, tidq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) 		 * Generate a last branch event for the branches left in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) 		 * circular buffer at the end of the trace.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) 		 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) 		 * Use the address of the end of the last reported execution
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) 		 * range
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) 		addr = cs_etm__last_executed_instr(tidq->prev_packet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) 		err = cs_etm__synth_instruction_sample(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) 			etmq, tidq, addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) 			tidq->period_instructions);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) 		if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) 		tidq->period_instructions = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) 	if (etm->sample_branches &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) 	    tidq->prev_packet->sample_type == CS_ETM_RANGE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) 		err = cs_etm__synth_branch_sample(etmq, tidq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) 		if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) swap_packet:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) 	cs_etm__packet_swap(etm, tidq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) 	/* Reset last branches after flush the trace */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) 	if (etm->synth_opts.last_branch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) 		cs_etm__reset_last_branch_rb(tidq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) static int cs_etm__end_block(struct cs_etm_queue *etmq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) 			     struct cs_etm_traceid_queue *tidq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) 	 * It has no new packet coming and 'etmq->packet' contains the stale
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) 	 * packet which was set at the previous time with packets swapping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) 	 * so skip to generate branch sample to avoid stale packet.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) 	 * For this case only flush branch stack and generate a last branch
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) 	 * event for the branches left in the circular buffer at the end of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) 	 * the trace.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) 	if (etmq->etm->synth_opts.last_branch &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) 	    tidq->prev_packet->sample_type == CS_ETM_RANGE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) 		u64 addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) 		/* Prepare last branches for instruction sample */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) 		cs_etm__copy_last_branch_rb(etmq, tidq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) 		 * Use the address of the end of the last reported execution
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) 		 * range.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) 		addr = cs_etm__last_executed_instr(tidq->prev_packet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) 		err = cs_etm__synth_instruction_sample(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) 			etmq, tidq, addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) 			tidq->period_instructions);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) 		if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) 		tidq->period_instructions = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613)  * cs_etm__get_data_block: Fetch a block from the auxtrace_buffer queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614)  *			   if need be.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615)  * Returns:	< 0	if error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616)  *		= 0	if no more auxtrace_buffer to read
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617)  *		> 0	if the current buffer isn't empty yet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) static int cs_etm__get_data_block(struct cs_etm_queue *etmq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) 	if (!etmq->buf_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) 		ret = cs_etm__get_trace(etmq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) 		if (ret <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) 			return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) 		 * We cannot assume consecutive blocks in the data file
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) 		 * are contiguous, reset the decoder to force re-sync.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) 		ret = cs_etm_decoder__reset(etmq->decoder);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) 			return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) 	return etmq->buf_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) static bool cs_etm__is_svc_instr(struct cs_etm_queue *etmq, u8 trace_chan_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) 				 struct cs_etm_packet *packet,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) 				 u64 end_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) 	/* Initialise to keep compiler happy */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) 	u16 instr16 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) 	u32 instr32 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) 	u64 addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) 	switch (packet->isa) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) 	case CS_ETM_ISA_T32:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) 		 * The SVC of T32 is defined in ARM DDI 0487D.a, F5.1.247:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) 		 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) 		 *  b'15         b'8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) 		 * +-----------------+--------+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) 		 * | 1 1 0 1 1 1 1 1 |  imm8  |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) 		 * +-----------------+--------+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) 		 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) 		 * According to the specifiction, it only defines SVC for T32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) 		 * with 16 bits instruction and has no definition for 32bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) 		 * so below only read 2 bytes as instruction size for T32.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) 		addr = end_addr - 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) 		cs_etm__mem_access(etmq, trace_chan_id, addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) 				   sizeof(instr16), (u8 *)&instr16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) 		if ((instr16 & 0xFF00) == 0xDF00)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) 			return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) 	case CS_ETM_ISA_A32:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) 		 * The SVC of A32 is defined in ARM DDI 0487D.a, F5.1.247:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) 		 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) 		 *  b'31 b'28 b'27 b'24
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) 		 * +---------+---------+-------------------------+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) 		 * |  !1111  | 1 1 1 1 |        imm24            |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) 		 * +---------+---------+-------------------------+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) 		addr = end_addr - 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) 		cs_etm__mem_access(etmq, trace_chan_id, addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) 				   sizeof(instr32), (u8 *)&instr32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) 		if ((instr32 & 0x0F000000) == 0x0F000000 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) 		    (instr32 & 0xF0000000) != 0xF0000000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) 			return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) 	case CS_ETM_ISA_A64:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) 		 * The SVC of A64 is defined in ARM DDI 0487D.a, C6.2.294:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) 		 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) 		 *  b'31               b'21           b'4     b'0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) 		 * +-----------------------+---------+-----------+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) 		 * | 1 1 0 1 0 1 0 0 0 0 0 |  imm16  | 0 0 0 0 1 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) 		 * +-----------------------+---------+-----------+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) 		addr = end_addr - 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) 		cs_etm__mem_access(etmq, trace_chan_id, addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) 				   sizeof(instr32), (u8 *)&instr32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) 		if ((instr32 & 0xFFE0001F) == 0xd4000001)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) 			return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) 	case CS_ETM_ISA_UNKNOWN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) 	return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) static bool cs_etm__is_syscall(struct cs_etm_queue *etmq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) 			       struct cs_etm_traceid_queue *tidq, u64 magic)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) 	u8 trace_chan_id = tidq->trace_chan_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) 	struct cs_etm_packet *packet = tidq->packet;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) 	struct cs_etm_packet *prev_packet = tidq->prev_packet;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) 	if (magic == __perf_cs_etmv3_magic)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) 		if (packet->exception_number == CS_ETMV3_EXC_SVC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) 			return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) 	 * ETMv4 exception type CS_ETMV4_EXC_CALL covers SVC, SMC and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) 	 * HVC cases; need to check if it's SVC instruction based on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) 	 * packet address.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) 	if (magic == __perf_cs_etmv4_magic) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) 		if (packet->exception_number == CS_ETMV4_EXC_CALL &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) 		    cs_etm__is_svc_instr(etmq, trace_chan_id, prev_packet,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) 					 prev_packet->end_addr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) 			return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) 	return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) static bool cs_etm__is_async_exception(struct cs_etm_traceid_queue *tidq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) 				       u64 magic)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) 	struct cs_etm_packet *packet = tidq->packet;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) 	if (magic == __perf_cs_etmv3_magic)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) 		if (packet->exception_number == CS_ETMV3_EXC_DEBUG_HALT ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) 		    packet->exception_number == CS_ETMV3_EXC_ASYNC_DATA_ABORT ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) 		    packet->exception_number == CS_ETMV3_EXC_PE_RESET ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) 		    packet->exception_number == CS_ETMV3_EXC_IRQ ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) 		    packet->exception_number == CS_ETMV3_EXC_FIQ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) 			return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) 	if (magic == __perf_cs_etmv4_magic)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) 		if (packet->exception_number == CS_ETMV4_EXC_RESET ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) 		    packet->exception_number == CS_ETMV4_EXC_DEBUG_HALT ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) 		    packet->exception_number == CS_ETMV4_EXC_SYSTEM_ERROR ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) 		    packet->exception_number == CS_ETMV4_EXC_INST_DEBUG ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) 		    packet->exception_number == CS_ETMV4_EXC_DATA_DEBUG ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) 		    packet->exception_number == CS_ETMV4_EXC_IRQ ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) 		    packet->exception_number == CS_ETMV4_EXC_FIQ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) 			return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) 	return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) static bool cs_etm__is_sync_exception(struct cs_etm_queue *etmq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) 				      struct cs_etm_traceid_queue *tidq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) 				      u64 magic)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) 	u8 trace_chan_id = tidq->trace_chan_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) 	struct cs_etm_packet *packet = tidq->packet;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) 	struct cs_etm_packet *prev_packet = tidq->prev_packet;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) 	if (magic == __perf_cs_etmv3_magic)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) 		if (packet->exception_number == CS_ETMV3_EXC_SMC ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) 		    packet->exception_number == CS_ETMV3_EXC_HYP ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) 		    packet->exception_number == CS_ETMV3_EXC_JAZELLE_THUMBEE ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) 		    packet->exception_number == CS_ETMV3_EXC_UNDEFINED_INSTR ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) 		    packet->exception_number == CS_ETMV3_EXC_PREFETCH_ABORT ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) 		    packet->exception_number == CS_ETMV3_EXC_DATA_FAULT ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) 		    packet->exception_number == CS_ETMV3_EXC_GENERIC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) 			return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) 	if (magic == __perf_cs_etmv4_magic) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) 		if (packet->exception_number == CS_ETMV4_EXC_TRAP ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) 		    packet->exception_number == CS_ETMV4_EXC_ALIGNMENT ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) 		    packet->exception_number == CS_ETMV4_EXC_INST_FAULT ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) 		    packet->exception_number == CS_ETMV4_EXC_DATA_FAULT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) 			return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) 		 * For CS_ETMV4_EXC_CALL, except SVC other instructions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) 		 * (SMC, HVC) are taken as sync exceptions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) 		if (packet->exception_number == CS_ETMV4_EXC_CALL &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) 		    !cs_etm__is_svc_instr(etmq, trace_chan_id, prev_packet,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) 					  prev_packet->end_addr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) 			return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) 		 * ETMv4 has 5 bits for exception number; if the numbers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) 		 * are in the range ( CS_ETMV4_EXC_FIQ, CS_ETMV4_EXC_END ]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) 		 * they are implementation defined exceptions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) 		 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) 		 * For this case, simply take it as sync exception.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) 		if (packet->exception_number > CS_ETMV4_EXC_FIQ &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) 		    packet->exception_number <= CS_ETMV4_EXC_END)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) 			return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) 	return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) static int cs_etm__set_sample_flags(struct cs_etm_queue *etmq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) 				    struct cs_etm_traceid_queue *tidq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) 	struct cs_etm_packet *packet = tidq->packet;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) 	struct cs_etm_packet *prev_packet = tidq->prev_packet;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) 	u8 trace_chan_id = tidq->trace_chan_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) 	u64 magic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) 	switch (packet->sample_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) 	case CS_ETM_RANGE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) 		 * Immediate branch instruction without neither link nor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) 		 * return flag, it's normal branch instruction within
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) 		 * the function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) 		if (packet->last_instr_type == OCSD_INSTR_BR &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) 		    packet->last_instr_subtype == OCSD_S_INSTR_NONE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) 			packet->flags = PERF_IP_FLAG_BRANCH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) 			if (packet->last_instr_cond)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) 				packet->flags |= PERF_IP_FLAG_CONDITIONAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) 		 * Immediate branch instruction with link (e.g. BL), this is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) 		 * branch instruction for function call.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) 		if (packet->last_instr_type == OCSD_INSTR_BR &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) 		    packet->last_instr_subtype == OCSD_S_INSTR_BR_LINK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) 			packet->flags = PERF_IP_FLAG_BRANCH |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) 					PERF_IP_FLAG_CALL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) 		 * Indirect branch instruction with link (e.g. BLR), this is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) 		 * branch instruction for function call.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) 		if (packet->last_instr_type == OCSD_INSTR_BR_INDIRECT &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) 		    packet->last_instr_subtype == OCSD_S_INSTR_BR_LINK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) 			packet->flags = PERF_IP_FLAG_BRANCH |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) 					PERF_IP_FLAG_CALL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) 		 * Indirect branch instruction with subtype of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) 		 * OCSD_S_INSTR_V7_IMPLIED_RET, this is explicit hint for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) 		 * function return for A32/T32.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) 		if (packet->last_instr_type == OCSD_INSTR_BR_INDIRECT &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) 		    packet->last_instr_subtype == OCSD_S_INSTR_V7_IMPLIED_RET)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) 			packet->flags = PERF_IP_FLAG_BRANCH |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) 					PERF_IP_FLAG_RETURN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) 		 * Indirect branch instruction without link (e.g. BR), usually
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) 		 * this is used for function return, especially for functions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) 		 * within dynamic link lib.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) 		if (packet->last_instr_type == OCSD_INSTR_BR_INDIRECT &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) 		    packet->last_instr_subtype == OCSD_S_INSTR_NONE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) 			packet->flags = PERF_IP_FLAG_BRANCH |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) 					PERF_IP_FLAG_RETURN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) 		/* Return instruction for function return. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) 		if (packet->last_instr_type == OCSD_INSTR_BR_INDIRECT &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) 		    packet->last_instr_subtype == OCSD_S_INSTR_V8_RET)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) 			packet->flags = PERF_IP_FLAG_BRANCH |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) 					PERF_IP_FLAG_RETURN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) 		 * Decoder might insert a discontinuity in the middle of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) 		 * instruction packets, fixup prev_packet with flag
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) 		 * PERF_IP_FLAG_TRACE_BEGIN to indicate restarting trace.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) 		if (prev_packet->sample_type == CS_ETM_DISCONTINUITY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) 			prev_packet->flags |= PERF_IP_FLAG_BRANCH |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) 					      PERF_IP_FLAG_TRACE_BEGIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) 		 * If the previous packet is an exception return packet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) 		 * and the return address just follows SVC instuction,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) 		 * it needs to calibrate the previous packet sample flags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) 		 * as PERF_IP_FLAG_SYSCALLRET.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) 		if (prev_packet->flags == (PERF_IP_FLAG_BRANCH |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) 					   PERF_IP_FLAG_RETURN |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) 					   PERF_IP_FLAG_INTERRUPT) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) 		    cs_etm__is_svc_instr(etmq, trace_chan_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) 					 packet, packet->start_addr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) 			prev_packet->flags = PERF_IP_FLAG_BRANCH |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) 					     PERF_IP_FLAG_RETURN |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) 					     PERF_IP_FLAG_SYSCALLRET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) 	case CS_ETM_DISCONTINUITY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) 		 * The trace is discontinuous, if the previous packet is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) 		 * instruction packet, set flag PERF_IP_FLAG_TRACE_END
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) 		 * for previous packet.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) 		if (prev_packet->sample_type == CS_ETM_RANGE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) 			prev_packet->flags |= PERF_IP_FLAG_BRANCH |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) 					      PERF_IP_FLAG_TRACE_END;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) 	case CS_ETM_EXCEPTION:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) 		ret = cs_etm__get_magic(packet->trace_chan_id, &magic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) 			return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) 		/* The exception is for system call. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) 		if (cs_etm__is_syscall(etmq, tidq, magic))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) 			packet->flags = PERF_IP_FLAG_BRANCH |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) 					PERF_IP_FLAG_CALL |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) 					PERF_IP_FLAG_SYSCALLRET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) 		 * The exceptions are triggered by external signals from bus,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) 		 * interrupt controller, debug module, PE reset or halt.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) 		else if (cs_etm__is_async_exception(tidq, magic))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) 			packet->flags = PERF_IP_FLAG_BRANCH |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) 					PERF_IP_FLAG_CALL |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) 					PERF_IP_FLAG_ASYNC |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) 					PERF_IP_FLAG_INTERRUPT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) 		 * Otherwise, exception is caused by trap, instruction &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) 		 * data fault, or alignment errors.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) 		else if (cs_etm__is_sync_exception(etmq, tidq, magic))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) 			packet->flags = PERF_IP_FLAG_BRANCH |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) 					PERF_IP_FLAG_CALL |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) 					PERF_IP_FLAG_INTERRUPT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) 		 * When the exception packet is inserted, since exception
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) 		 * packet is not used standalone for generating samples
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) 		 * and it's affiliation to the previous instruction range
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) 		 * packet; so set previous range packet flags to tell perf
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) 		 * it is an exception taken branch.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) 		if (prev_packet->sample_type == CS_ETM_RANGE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) 			prev_packet->flags = packet->flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) 	case CS_ETM_EXCEPTION_RET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) 		 * When the exception return packet is inserted, since
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) 		 * exception return packet is not used standalone for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) 		 * generating samples and it's affiliation to the previous
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) 		 * instruction range packet; so set previous range packet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) 		 * flags to tell perf it is an exception return branch.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) 		 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) 		 * The exception return can be for either system call or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) 		 * other exception types; unfortunately the packet doesn't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) 		 * contain exception type related info so we cannot decide
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) 		 * the exception type purely based on exception return packet.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) 		 * If we record the exception number from exception packet and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) 		 * reuse it for excpetion return packet, this is not reliable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) 		 * due the trace can be discontinuity or the interrupt can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) 		 * be nested, thus the recorded exception number cannot be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) 		 * used for exception return packet for these two cases.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) 		 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) 		 * For exception return packet, we only need to distinguish the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) 		 * packet is for system call or for other types.  Thus the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) 		 * decision can be deferred when receive the next packet which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) 		 * contains the return address, based on the return address we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) 		 * can read out the previous instruction and check if it's a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) 		 * system call instruction and then calibrate the sample flag
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) 		 * as needed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) 		if (prev_packet->sample_type == CS_ETM_RANGE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) 			prev_packet->flags = PERF_IP_FLAG_BRANCH |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) 					     PERF_IP_FLAG_RETURN |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) 					     PERF_IP_FLAG_INTERRUPT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) 	case CS_ETM_EMPTY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) static int cs_etm__decode_data_block(struct cs_etm_queue *etmq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) 	size_t processed = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) 	 * Packets are decoded and added to the decoder's packet queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) 	 * until the decoder packet processing callback has requested that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) 	 * processing stops or there is nothing left in the buffer.  Normal
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) 	 * operations that stop processing are a timestamp packet or a full
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) 	 * decoder buffer queue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) 	ret = cs_etm_decoder__process_data_block(etmq->decoder,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) 						 etmq->offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) 						 &etmq->buf[etmq->buf_used],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) 						 etmq->buf_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) 						 &processed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) 	etmq->offset += processed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) 	etmq->buf_used += processed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) 	etmq->buf_len -= processed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) static int cs_etm__process_traceid_queue(struct cs_etm_queue *etmq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) 					 struct cs_etm_traceid_queue *tidq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) 	struct cs_etm_packet_queue *packet_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) 	packet_queue = &tidq->packet_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) 	/* Process each packet in this chunk */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) 	while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) 		ret = cs_etm_decoder__get_packet(packet_queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) 						 tidq->packet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) 		if (ret <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) 			 * Stop processing this chunk on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) 			 * end of data or error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) 		 * Since packet addresses are swapped in packet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) 		 * handling within below switch() statements,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) 		 * thus setting sample flags must be called
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) 		 * prior to switch() statement to use address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) 		 * information before packets swapping.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) 		ret = cs_etm__set_sample_flags(etmq, tidq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) 		if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) 		switch (tidq->packet->sample_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) 		case CS_ETM_RANGE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) 			 * If the packet contains an instruction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) 			 * range, generate instruction sequence
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) 			 * events.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) 			cs_etm__sample(etmq, tidq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) 		case CS_ETM_EXCEPTION:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) 		case CS_ETM_EXCEPTION_RET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) 			 * If the exception packet is coming,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) 			 * make sure the previous instruction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) 			 * range packet to be handled properly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) 			cs_etm__exception(tidq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) 		case CS_ETM_DISCONTINUITY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) 			 * Discontinuity in trace, flush
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) 			 * previous branch stack
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) 			cs_etm__flush(etmq, tidq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) 		case CS_ETM_EMPTY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) 			 * Should not receive empty packet,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) 			 * report error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) 			pr_err("CS ETM Trace: empty packet\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) 		default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) static void cs_etm__clear_all_traceid_queues(struct cs_etm_queue *etmq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) 	int idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) 	struct int_node *inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) 	struct cs_etm_traceid_queue *tidq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) 	struct intlist *traceid_queues_list = etmq->traceid_queues_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) 	intlist__for_each_entry(inode, traceid_queues_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) 		idx = (int)(intptr_t)inode->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) 		tidq = etmq->traceid_queues[idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) 		/* Ignore return value */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) 		cs_etm__process_traceid_queue(etmq, tidq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) 		 * Generate an instruction sample with the remaining
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) 		 * branchstack entries.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) 		cs_etm__flush(etmq, tidq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) static int cs_etm__run_decoder(struct cs_etm_queue *etmq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) 	int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) 	struct cs_etm_traceid_queue *tidq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) 	tidq = cs_etm__etmq_get_traceid_queue(etmq, CS_ETM_PER_THREAD_TRACEID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) 	if (!tidq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) 	/* Go through each buffer in the queue and decode them one by one */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) 	while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) 		err = cs_etm__get_data_block(etmq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) 		if (err <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) 		/* Run trace decoder until buffer consumed or end of trace */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) 		do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) 			err = cs_etm__decode_data_block(etmq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) 			if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) 				return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) 			 * Process each packet in this chunk, nothing to do if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) 			 * an error occurs other than hoping the next one will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) 			 * be better.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) 			err = cs_etm__process_traceid_queue(etmq, tidq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) 		} while (etmq->buf_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) 		if (err == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) 			/* Flush any remaining branch stack entries */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) 			err = cs_etm__end_block(etmq, tidq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) static int cs_etm__process_timeless_queues(struct cs_etm_auxtrace *etm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) 					   pid_t tid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151) 	unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) 	struct auxtrace_queues *queues = &etm->queues;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) 	for (i = 0; i < queues->nr_queues; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) 		struct auxtrace_queue *queue = &etm->queues.queue_array[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) 		struct cs_etm_queue *etmq = queue->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) 		struct cs_etm_traceid_queue *tidq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) 		if (!etmq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) 		tidq = cs_etm__etmq_get_traceid_queue(etmq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) 						CS_ETM_PER_THREAD_TRACEID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) 		if (!tidq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) 		if ((tid == -1) || (tidq->tid == tid)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169) 			cs_etm__set_pid_tid_cpu(etm, tidq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) 			cs_etm__run_decoder(etmq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177) static int cs_etm__process_queues(struct cs_etm_auxtrace *etm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) 	unsigned int cs_queue_nr, queue_nr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) 	u8 trace_chan_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) 	u64 timestamp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183) 	struct auxtrace_queue *queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) 	struct cs_etm_queue *etmq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) 	struct cs_etm_traceid_queue *tidq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) 	while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) 		if (!etm->heap.heap_cnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) 		/* Take the entry at the top of the min heap */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) 		cs_queue_nr = etm->heap.heap_array[0].queue_nr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) 		queue_nr = TO_QUEUE_NR(cs_queue_nr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194) 		trace_chan_id = TO_TRACE_CHAN_ID(cs_queue_nr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) 		queue = &etm->queues.queue_array[queue_nr];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) 		etmq = queue->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199) 		 * Remove the top entry from the heap since we are about
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200) 		 * to process it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202) 		auxtrace_heap__pop(&etm->heap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) 		tidq  = cs_etm__etmq_get_traceid_queue(etmq, trace_chan_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) 		if (!tidq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207) 			 * No traceID queue has been allocated for this traceID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) 			 * which means something somewhere went very wrong.  No
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) 			 * other choice than simply exit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211) 			ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216) 		 * Packets associated with this timestamp are already in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217) 		 * the etmq's traceID queue, so process them.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219) 		ret = cs_etm__process_traceid_queue(etmq, tidq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220) 		if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224) 		 * Packets for this timestamp have been processed, time to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) 		 * move on to the next timestamp, fetching a new auxtrace_buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226) 		 * if need be.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228) refetch:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229) 		ret = cs_etm__get_data_block(etmq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) 		if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234) 		 * No more auxtrace_buffers to process in this etmq, simply
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) 		 * move on to another entry in the auxtrace_heap.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237) 		if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240) 		ret = cs_etm__decode_data_block(etmq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244) 		timestamp = cs_etm__etmq_get_timestamp(etmq, &trace_chan_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246) 		if (!timestamp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248) 			 * Function cs_etm__decode_data_block() returns when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249) 			 * there is no more traces to decode in the current
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250) 			 * auxtrace_buffer OR when a timestamp has been
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251) 			 * encountered on any of the traceID queues.  Since we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252) 			 * did not get a timestamp, there is no more traces to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253) 			 * process in this auxtrace_buffer.  As such empty and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254) 			 * flush all traceID queues.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256) 			cs_etm__clear_all_traceid_queues(etmq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258) 			/* Fetch another auxtrace_buffer for this etmq */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259) 			goto refetch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263) 		 * Add to the min heap the timestamp for packets that have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264) 		 * just been decoded.  They will be processed and synthesized
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265) 		 * during the next call to cs_etm__process_traceid_queue() for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) 		 * this queue/traceID.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268) 		cs_queue_nr = TO_CS_QUEUE_NR(queue_nr, trace_chan_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269) 		ret = auxtrace_heap__add(&etm->heap, cs_queue_nr, timestamp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276) static int cs_etm__process_itrace_start(struct cs_etm_auxtrace *etm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277) 					union perf_event *event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279) 	struct thread *th;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281) 	if (etm->timeless_decoding)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285) 	 * Add the tid/pid to the log so that we can get a match when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286) 	 * we get a contextID from the decoder.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288) 	th = machine__findnew_thread(etm->machine,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289) 				     event->itrace_start.pid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290) 				     event->itrace_start.tid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291) 	if (!th)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294) 	thread__put(th);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299) static int cs_etm__process_switch_cpu_wide(struct cs_etm_auxtrace *etm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300) 					   union perf_event *event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302) 	struct thread *th;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303) 	bool out = event->header.misc & PERF_RECORD_MISC_SWITCH_OUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306) 	 * Context switch in per-thread mode are irrelevant since perf
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307) 	 * will start/stop tracing as the process is scheduled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309) 	if (etm->timeless_decoding)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313) 	 * SWITCH_IN events carry the next process to be switched out while
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314) 	 * SWITCH_OUT events carry the process to be switched in.  As such
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315) 	 * we don't care about IN events.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317) 	if (!out)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321) 	 * Add the tid/pid to the log so that we can get a match when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322) 	 * we get a contextID from the decoder.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324) 	th = machine__findnew_thread(etm->machine,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325) 				     event->context_switch.next_prev_pid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326) 				     event->context_switch.next_prev_tid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327) 	if (!th)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330) 	thread__put(th);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335) static int cs_etm__process_event(struct perf_session *session,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336) 				 union perf_event *event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337) 				 struct perf_sample *sample,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338) 				 struct perf_tool *tool)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340) 	int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341) 	u64 timestamp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342) 	struct cs_etm_auxtrace *etm = container_of(session->auxtrace,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343) 						   struct cs_etm_auxtrace,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344) 						   auxtrace);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2345) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2346) 	if (dump_trace)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2347) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2348) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2349) 	if (!tool->ordered_events) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2350) 		pr_err("CoreSight ETM Trace requires ordered events\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2351) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2352) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2353) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2354) 	if (sample->time && (sample->time != (u64) -1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2355) 		timestamp = sample->time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2356) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2357) 		timestamp = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2358) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2359) 	if (timestamp || etm->timeless_decoding) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2360) 		err = cs_etm__update_queues(etm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2361) 		if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2362) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2363) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2364) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2365) 	if (etm->timeless_decoding &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2366) 	    event->header.type == PERF_RECORD_EXIT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2367) 		return cs_etm__process_timeless_queues(etm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2368) 						       event->fork.tid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2369) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2370) 	if (event->header.type == PERF_RECORD_ITRACE_START)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2371) 		return cs_etm__process_itrace_start(etm, event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2372) 	else if (event->header.type == PERF_RECORD_SWITCH_CPU_WIDE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2373) 		return cs_etm__process_switch_cpu_wide(etm, event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2374) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2375) 	if (!etm->timeless_decoding &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2376) 	    event->header.type == PERF_RECORD_AUX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2377) 		return cs_etm__process_queues(etm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2378) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2379) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2380) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2381) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2382) static int cs_etm__process_auxtrace_event(struct perf_session *session,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2383) 					  union perf_event *event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2384) 					  struct perf_tool *tool __maybe_unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2385) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2386) 	struct cs_etm_auxtrace *etm = container_of(session->auxtrace,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2387) 						   struct cs_etm_auxtrace,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2388) 						   auxtrace);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2389) 	if (!etm->data_queued) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2390) 		struct auxtrace_buffer *buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2391) 		off_t  data_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2392) 		int fd = perf_data__fd(session->data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2393) 		bool is_pipe = perf_data__is_pipe(session->data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2394) 		int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2395) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2396) 		if (is_pipe)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2397) 			data_offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2398) 		else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2399) 			data_offset = lseek(fd, 0, SEEK_CUR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2400) 			if (data_offset == -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2401) 				return -errno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2402) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2403) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2404) 		err = auxtrace_queues__add_event(&etm->queues, session,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2405) 						 event, data_offset, &buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2406) 		if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2407) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2408) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2409) 		if (dump_trace)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2410) 			if (auxtrace_buffer__get_data(buffer, fd)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2411) 				cs_etm__dump_event(etm, buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2412) 				auxtrace_buffer__put_data(buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2413) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2414) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2415) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2416) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2417) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2418) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2419) static bool cs_etm__is_timeless_decoding(struct cs_etm_auxtrace *etm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2420) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2421) 	struct evsel *evsel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2422) 	struct evlist *evlist = etm->session->evlist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2423) 	bool timeless_decoding = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2424) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2425) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2426) 	 * Circle through the list of event and complain if we find one
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2427) 	 * with the time bit set.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2428) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2429) 	evlist__for_each_entry(evlist, evsel) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2430) 		if ((evsel->core.attr.sample_type & PERF_SAMPLE_TIME))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2431) 			timeless_decoding = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2432) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2433) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2434) 	return timeless_decoding;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2435) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2436) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2437) static const char * const cs_etm_global_header_fmts[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2438) 	[CS_HEADER_VERSION_0]	= "	Header version		       %llx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2439) 	[CS_PMU_TYPE_CPUS]	= "	PMU type/num cpus	       %llx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2440) 	[CS_ETM_SNAPSHOT]	= "	Snapshot		       %llx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2441) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2442) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2443) static const char * const cs_etm_priv_fmts[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2444) 	[CS_ETM_MAGIC]		= "	Magic number		       %llx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2445) 	[CS_ETM_CPU]		= "	CPU			       %lld\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2446) 	[CS_ETM_ETMCR]		= "	ETMCR			       %llx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2447) 	[CS_ETM_ETMTRACEIDR]	= "	ETMTRACEIDR		       %llx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2448) 	[CS_ETM_ETMCCER]	= "	ETMCCER			       %llx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2449) 	[CS_ETM_ETMIDR]		= "	ETMIDR			       %llx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2450) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2451) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2452) static const char * const cs_etmv4_priv_fmts[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2453) 	[CS_ETM_MAGIC]		= "	Magic number		       %llx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2454) 	[CS_ETM_CPU]		= "	CPU			       %lld\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2455) 	[CS_ETMV4_TRCCONFIGR]	= "	TRCCONFIGR		       %llx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2456) 	[CS_ETMV4_TRCTRACEIDR]	= "	TRCTRACEIDR		       %llx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2457) 	[CS_ETMV4_TRCIDR0]	= "	TRCIDR0			       %llx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2458) 	[CS_ETMV4_TRCIDR1]	= "	TRCIDR1			       %llx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2459) 	[CS_ETMV4_TRCIDR2]	= "	TRCIDR2			       %llx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2460) 	[CS_ETMV4_TRCIDR8]	= "	TRCIDR8			       %llx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2461) 	[CS_ETMV4_TRCAUTHSTATUS] = "	TRCAUTHSTATUS		       %llx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2462) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2463) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2464) static void cs_etm__print_auxtrace_info(__u64 *val, int num)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2465) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2466) 	int i, j, cpu = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2467) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2468) 	for (i = 0; i < CS_HEADER_VERSION_0_MAX; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2469) 		fprintf(stdout, cs_etm_global_header_fmts[i], val[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2470) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2471) 	for (i = CS_HEADER_VERSION_0_MAX; cpu < num; cpu++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2472) 		if (val[i] == __perf_cs_etmv3_magic)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2473) 			for (j = 0; j < CS_ETM_PRIV_MAX; j++, i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2474) 				fprintf(stdout, cs_etm_priv_fmts[j], val[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2475) 		else if (val[i] == __perf_cs_etmv4_magic)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2476) 			for (j = 0; j < CS_ETMV4_PRIV_MAX; j++, i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2477) 				fprintf(stdout, cs_etmv4_priv_fmts[j], val[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2478) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2479) 			/* failure.. return */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2480) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2481) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2482) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2483) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2484) int cs_etm__process_auxtrace_info(union perf_event *event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2485) 				  struct perf_session *session)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2486) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2487) 	struct perf_record_auxtrace_info *auxtrace_info = &event->auxtrace_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2488) 	struct cs_etm_auxtrace *etm = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2489) 	struct int_node *inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2490) 	unsigned int pmu_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2491) 	int event_header_size = sizeof(struct perf_event_header);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2492) 	int info_header_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2493) 	int total_size = auxtrace_info->header.size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2494) 	int priv_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2495) 	int num_cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2496) 	int err = 0, idx = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2497) 	int i, j, k;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2498) 	u64 *ptr, *hdr = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2499) 	u64 **metadata = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2500) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2501) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2502) 	 * sizeof(auxtrace_info_event::type) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2503) 	 * sizeof(auxtrace_info_event::reserved) == 8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2504) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2505) 	info_header_size = 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2506) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2507) 	if (total_size < (event_header_size + info_header_size))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2508) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2509) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2510) 	priv_size = total_size - event_header_size - info_header_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2511) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2512) 	/* First the global part */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2513) 	ptr = (u64 *) auxtrace_info->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2514) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2515) 	/* Look for version '0' of the header */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2516) 	if (ptr[0] != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2517) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2518) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2519) 	hdr = zalloc(sizeof(*hdr) * CS_HEADER_VERSION_0_MAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2520) 	if (!hdr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2521) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2522) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2523) 	/* Extract header information - see cs-etm.h for format */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2524) 	for (i = 0; i < CS_HEADER_VERSION_0_MAX; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2525) 		hdr[i] = ptr[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2526) 	num_cpu = hdr[CS_PMU_TYPE_CPUS] & 0xffffffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2527) 	pmu_type = (unsigned int) ((hdr[CS_PMU_TYPE_CPUS] >> 32) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2528) 				    0xffffffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2529) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2530) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2531) 	 * Create an RB tree for traceID-metadata tuple.  Since the conversion
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2532) 	 * has to be made for each packet that gets decoded, optimizing access
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2533) 	 * in anything other than a sequential array is worth doing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2534) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2535) 	traceid_list = intlist__new(NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2536) 	if (!traceid_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2537) 		err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2538) 		goto err_free_hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2539) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2540) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2541) 	metadata = zalloc(sizeof(*metadata) * num_cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2542) 	if (!metadata) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2543) 		err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2544) 		goto err_free_traceid_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2545) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2546) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2547) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2548) 	 * The metadata is stored in the auxtrace_info section and encodes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2549) 	 * the configuration of the ARM embedded trace macrocell which is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2550) 	 * required by the trace decoder to properly decode the trace due
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2551) 	 * to its highly compressed nature.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2552) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2553) 	for (j = 0; j < num_cpu; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2554) 		if (ptr[i] == __perf_cs_etmv3_magic) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2555) 			metadata[j] = zalloc(sizeof(*metadata[j]) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2556) 					     CS_ETM_PRIV_MAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2557) 			if (!metadata[j]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2558) 				err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2559) 				goto err_free_metadata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2560) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2561) 			for (k = 0; k < CS_ETM_PRIV_MAX; k++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2562) 				metadata[j][k] = ptr[i + k];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2563) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2564) 			/* The traceID is our handle */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2565) 			idx = metadata[j][CS_ETM_ETMTRACEIDR];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2566) 			i += CS_ETM_PRIV_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2567) 		} else if (ptr[i] == __perf_cs_etmv4_magic) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2568) 			metadata[j] = zalloc(sizeof(*metadata[j]) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2569) 					     CS_ETMV4_PRIV_MAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2570) 			if (!metadata[j]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2571) 				err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2572) 				goto err_free_metadata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2573) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2574) 			for (k = 0; k < CS_ETMV4_PRIV_MAX; k++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2575) 				metadata[j][k] = ptr[i + k];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2576) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2577) 			/* The traceID is our handle */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2578) 			idx = metadata[j][CS_ETMV4_TRCTRACEIDR];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2579) 			i += CS_ETMV4_PRIV_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2580) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2581) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2582) 		/* Get an RB node for this CPU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2583) 		inode = intlist__findnew(traceid_list, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2584) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2585) 		/* Something went wrong, no need to continue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2586) 		if (!inode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2587) 			err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2588) 			goto err_free_metadata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2589) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2590) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2591) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2592) 		 * The node for that CPU should not be taken.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2593) 		 * Back out if that's the case.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2594) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2595) 		if (inode->priv) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2596) 			err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2597) 			goto err_free_metadata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2598) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2599) 		/* All good, associate the traceID with the metadata pointer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2600) 		inode->priv = metadata[j];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2601) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2602) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2603) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2604) 	 * Each of CS_HEADER_VERSION_0_MAX, CS_ETM_PRIV_MAX and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2605) 	 * CS_ETMV4_PRIV_MAX mark how many double words are in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2606) 	 * global metadata, and each cpu's metadata respectively.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2607) 	 * The following tests if the correct number of double words was
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2608) 	 * present in the auxtrace info section.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2609) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2610) 	if (i * 8 != priv_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2611) 		err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2612) 		goto err_free_metadata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2613) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2614) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2615) 	etm = zalloc(sizeof(*etm));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2616) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2617) 	if (!etm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2618) 		err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2619) 		goto err_free_metadata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2620) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2621) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2622) 	err = auxtrace_queues__init(&etm->queues);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2623) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2624) 		goto err_free_etm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2625) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2626) 	etm->session = session;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2627) 	etm->machine = &session->machines.host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2628) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2629) 	etm->num_cpu = num_cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2630) 	etm->pmu_type = pmu_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2631) 	etm->snapshot_mode = (hdr[CS_ETM_SNAPSHOT] != 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2632) 	etm->metadata = metadata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2633) 	etm->auxtrace_type = auxtrace_info->type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2634) 	etm->timeless_decoding = cs_etm__is_timeless_decoding(etm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2635) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2636) 	etm->auxtrace.process_event = cs_etm__process_event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2637) 	etm->auxtrace.process_auxtrace_event = cs_etm__process_auxtrace_event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2638) 	etm->auxtrace.flush_events = cs_etm__flush_events;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2639) 	etm->auxtrace.free_events = cs_etm__free_events;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2640) 	etm->auxtrace.free = cs_etm__free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2641) 	etm->auxtrace.evsel_is_auxtrace = cs_etm__evsel_is_auxtrace;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2642) 	session->auxtrace = &etm->auxtrace;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2643) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2644) 	etm->unknown_thread = thread__new(999999999, 999999999);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2645) 	if (!etm->unknown_thread) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2646) 		err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2647) 		goto err_free_queues;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2648) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2649) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2650) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2651) 	 * Initialize list node so that at thread__zput() we can avoid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2652) 	 * segmentation fault at list_del_init().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2653) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2654) 	INIT_LIST_HEAD(&etm->unknown_thread->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2655) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2656) 	err = thread__set_comm(etm->unknown_thread, "unknown", 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2657) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2658) 		goto err_delete_thread;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2659) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2660) 	if (thread__init_maps(etm->unknown_thread, etm->machine)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2661) 		err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2662) 		goto err_delete_thread;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2663) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2664) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2665) 	if (dump_trace) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2666) 		cs_etm__print_auxtrace_info(auxtrace_info->priv, num_cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2667) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2668) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2669) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2670) 	if (session->itrace_synth_opts->set) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2671) 		etm->synth_opts = *session->itrace_synth_opts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2672) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2673) 		itrace_synth_opts__set_default(&etm->synth_opts,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2674) 				session->itrace_synth_opts->default_no_sample);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2675) 		etm->synth_opts.callchain = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2676) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2677) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2678) 	err = cs_etm__synth_events(etm, session);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2679) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2680) 		goto err_delete_thread;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2681) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2682) 	err = auxtrace_queues__process_index(&etm->queues, session);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2683) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2684) 		goto err_delete_thread;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2685) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2686) 	etm->data_queued = etm->queues.populated;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2687) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2688) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2689) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2690) err_delete_thread:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2691) 	thread__zput(etm->unknown_thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2692) err_free_queues:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2693) 	auxtrace_queues__free(&etm->queues);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2694) 	session->auxtrace = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2695) err_free_etm:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2696) 	zfree(&etm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2697) err_free_metadata:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2698) 	/* No need to check @metadata[j], free(NULL) is supported */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2699) 	for (j = 0; j < num_cpu; j++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2700) 		zfree(&metadata[j]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2701) 	zfree(&metadata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2702) err_free_traceid_list:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2703) 	intlist__delete(traceid_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2704) err_free_hdr:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2705) 	zfree(&hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2706) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2707) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2708) }