^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* SPDX-License-Identifier: GPL-2.0-only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * auxtrace.h: AUX area trace support
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Copyright (c) 2013-2015, Intel Corporation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #ifndef __PERF_AUXTRACE_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #define __PERF_AUXTRACE_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <sys/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <stdbool.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <stddef.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <stdio.h> // FILE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/list.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/perf_event.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <asm/bitsperlong.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <asm/barrier.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) union perf_event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) struct perf_session;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) struct evlist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) struct evsel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) struct perf_tool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) struct mmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) struct perf_sample;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) struct option;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) struct record_opts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) struct perf_record_auxtrace_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) struct perf_record_auxtrace_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) struct events_stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) struct perf_pmu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) enum auxtrace_error_type {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) PERF_AUXTRACE_ERROR_ITRACE = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) PERF_AUXTRACE_ERROR_MAX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) /* Auxtrace records must have the same alignment as perf event records */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #define PERF_AUXTRACE_RECORD_ALIGNMENT 8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) enum auxtrace_type {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) PERF_AUXTRACE_UNKNOWN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) PERF_AUXTRACE_INTEL_PT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) PERF_AUXTRACE_INTEL_BTS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) PERF_AUXTRACE_CS_ETM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) PERF_AUXTRACE_ARM_SPE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) PERF_AUXTRACE_S390_CPUMSF,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) enum itrace_period_type {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) PERF_ITRACE_PERIOD_INSTRUCTIONS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) PERF_ITRACE_PERIOD_TICKS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) PERF_ITRACE_PERIOD_NANOSECS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) #define AUXTRACE_ERR_FLG_OVERFLOW (1 << ('o' - 'a'))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) #define AUXTRACE_ERR_FLG_DATA_LOST (1 << ('l' - 'a'))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) #define AUXTRACE_LOG_FLG_ALL_PERF_EVTS (1 << ('a' - 'a'))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) * struct itrace_synth_opts - AUX area tracing synthesis options.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) * @set: indicates whether or not options have been set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) * @default_no_sample: Default to no sampling.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) * @inject: indicates the event (not just the sample) must be fully synthesized
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) * because 'perf inject' will write it out
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) * @instructions: whether to synthesize 'instructions' events
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) * @branches: whether to synthesize 'branches' events
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) * (branch misses only for Arm SPE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) * @transactions: whether to synthesize events for transactions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) * @ptwrites: whether to synthesize events for ptwrites
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) * @pwr_events: whether to synthesize power events
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) * @other_events: whether to synthesize other events recorded due to the use of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) * aux_output
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) * @errors: whether to synthesize decoder error events
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) * @dont_decode: whether to skip decoding entirely
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) * @log: write a decoding log
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) * @calls: limit branch samples to calls (can be combined with @returns)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) * @returns: limit branch samples to returns (can be combined with @calls)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) * @callchain: add callchain to 'instructions' events
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) * @add_callchain: add callchain to existing event records
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) * @thread_stack: feed branches to the thread_stack
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) * @last_branch: add branch context to 'instruction' events
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) * @add_last_branch: add branch context to existing event records
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) * @flc: whether to synthesize first level cache events
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) * @llc: whether to synthesize last level cache events
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) * @tlb: whether to synthesize TLB events
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) * @remote_access: whether to synthesize remote access events
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) * @callchain_sz: maximum callchain size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) * @last_branch_sz: branch context size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) * @period: 'instructions' events period
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) * @period_type: 'instructions' events period type
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) * @initial_skip: skip N events at the beginning.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) * @cpu_bitmap: CPUs for which to synthesize events, or NULL for all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) * @ptime_range: time intervals to trace or NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) * @range_num: number of time intervals to trace
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) * @error_plus_flags: flags to affect what errors are reported
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) * @error_minus_flags: flags to affect what errors are reported
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) * @log_plus_flags: flags to affect what is logged
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) * @log_minus_flags: flags to affect what is logged
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) * @quick: quicker (less detailed) decoding
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) struct itrace_synth_opts {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) bool set;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) bool default_no_sample;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) bool inject;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) bool instructions;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) bool branches;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) bool transactions;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) bool ptwrites;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) bool pwr_events;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) bool other_events;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) bool errors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) bool dont_decode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) bool log;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) bool calls;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) bool returns;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) bool callchain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) bool add_callchain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) bool thread_stack;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) bool last_branch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) bool add_last_branch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) bool flc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) bool llc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) bool tlb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) bool remote_access;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) unsigned int callchain_sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) unsigned int last_branch_sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) unsigned long long period;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) enum itrace_period_type period_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) unsigned long initial_skip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) unsigned long *cpu_bitmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) struct perf_time_interval *ptime_range;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) int range_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) unsigned int error_plus_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) unsigned int error_minus_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) unsigned int log_plus_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) unsigned int log_minus_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) unsigned int quick;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) * struct auxtrace_index_entry - indexes a AUX area tracing event within a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) * perf.data file.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) * @file_offset: offset within the perf.data file
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) * @sz: size of the event
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) struct auxtrace_index_entry {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) u64 file_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) u64 sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) #define PERF_AUXTRACE_INDEX_ENTRY_COUNT 256
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) * struct auxtrace_index - index of AUX area tracing events within a perf.data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) * file.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) * @list: linking a number of arrays of entries
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) * @nr: number of entries
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) * @entries: array of entries
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) struct auxtrace_index {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) struct list_head list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) size_t nr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) struct auxtrace_index_entry entries[PERF_AUXTRACE_INDEX_ENTRY_COUNT];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) * struct auxtrace - session callbacks to allow AUX area data decoding.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) * @process_event: lets the decoder see all session events
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) * @process_auxtrace_event: process a PERF_RECORD_AUXTRACE event
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) * @queue_data: queue an AUX sample or PERF_RECORD_AUXTRACE event for later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) * processing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) * @dump_auxtrace_sample: dump AUX area sample data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) * @flush_events: process any remaining data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) * @free_events: free resources associated with event processing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) * @free: free resources associated with the session
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) struct auxtrace {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) int (*process_event)(struct perf_session *session,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) union perf_event *event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) struct perf_sample *sample,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) struct perf_tool *tool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) int (*process_auxtrace_event)(struct perf_session *session,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) union perf_event *event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) struct perf_tool *tool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) int (*queue_data)(struct perf_session *session,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) struct perf_sample *sample, union perf_event *event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) u64 data_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) void (*dump_auxtrace_sample)(struct perf_session *session,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) struct perf_sample *sample);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) int (*flush_events)(struct perf_session *session,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) struct perf_tool *tool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) void (*free_events)(struct perf_session *session);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) void (*free)(struct perf_session *session);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) bool (*evsel_is_auxtrace)(struct perf_session *session,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) struct evsel *evsel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) * struct auxtrace_buffer - a buffer containing AUX area tracing data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) * @list: buffers are queued in a list held by struct auxtrace_queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) * @size: size of the buffer in bytes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) * @pid: in per-thread mode, the pid this buffer is associated with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) * @tid: in per-thread mode, the tid this buffer is associated with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) * @cpu: in per-cpu mode, the cpu this buffer is associated with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) * @data: actual buffer data (can be null if the data has not been loaded)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) * @data_offset: file offset at which the buffer can be read
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) * @mmap_addr: mmap address at which the buffer can be read
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) * @mmap_size: size of the mmap at @mmap_addr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) * @data_needs_freeing: @data was malloc'd so free it when it is no longer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) * needed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) * @consecutive: the original data was split up and this buffer is consecutive
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) * to the previous buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) * @offset: offset as determined by aux_head / aux_tail members of struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) * perf_event_mmap_page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) * @reference: an implementation-specific reference determined when the data is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) * recorded
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) * @buffer_nr: used to number each buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) * @use_size: implementation actually only uses this number of bytes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) * @use_data: implementation actually only uses data starting at this address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) struct auxtrace_buffer {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) struct list_head list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) size_t size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) pid_t pid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) pid_t tid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) void *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) off_t data_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) void *mmap_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) size_t mmap_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) bool data_needs_freeing;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) bool consecutive;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) u64 offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) u64 reference;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) u64 buffer_nr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) size_t use_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) void *use_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) * struct auxtrace_queue - a queue of AUX area tracing data buffers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) * @head: head of buffer list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) * @tid: in per-thread mode, the tid this queue is associated with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) * @cpu: in per-cpu mode, the cpu this queue is associated with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) * @set: %true once this queue has been dedicated to a specific thread or cpu
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) * @priv: implementation-specific data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) struct auxtrace_queue {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) struct list_head head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) pid_t tid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) bool set;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) void *priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) * struct auxtrace_queues - an array of AUX area tracing queues.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) * @queue_array: array of queues
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) * @nr_queues: number of queues
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) * @new_data: set whenever new data is queued
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) * @populated: queues have been fully populated using the auxtrace_index
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) * @next_buffer_nr: used to number each buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) struct auxtrace_queues {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) struct auxtrace_queue *queue_array;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) unsigned int nr_queues;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) bool new_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) bool populated;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) u64 next_buffer_nr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) * struct auxtrace_heap_item - element of struct auxtrace_heap.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) * @queue_nr: queue number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) * @ordinal: value used for sorting (lowest ordinal is top of the heap) expected
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) * to be a timestamp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) struct auxtrace_heap_item {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) unsigned int queue_nr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) u64 ordinal;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) * struct auxtrace_heap - a heap suitable for sorting AUX area tracing queues.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) * @heap_array: the heap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) * @heap_cnt: the number of elements in the heap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) * @heap_sz: maximum number of elements (grows as needed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) struct auxtrace_heap {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) struct auxtrace_heap_item *heap_array;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) unsigned int heap_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) unsigned int heap_sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) * struct auxtrace_mmap - records an mmap of the auxtrace buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) * @base: address of mapped area
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) * @userpg: pointer to buffer's perf_event_mmap_page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) * @mask: %0 if @len is not a power of two, otherwise (@len - %1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) * @len: size of mapped area
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) * @prev: previous aux_head
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) * @idx: index of this mmap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) * @tid: tid for a per-thread mmap (also set if there is only 1 tid on a per-cpu
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) * mmap) otherwise %0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) * @cpu: cpu number for a per-cpu mmap otherwise %-1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) struct auxtrace_mmap {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) void *base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) void *userpg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) size_t mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) size_t len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) u64 prev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) int idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) pid_t tid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) * struct auxtrace_mmap_params - parameters to set up struct auxtrace_mmap.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) * @mask: %0 if @len is not a power of two, otherwise (@len - %1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) * @offset: file offset of mapped area
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) * @len: size of mapped area
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) * @prot: mmap memory protection
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) * @idx: index of this mmap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) * @tid: tid for a per-thread mmap (also set if there is only 1 tid on a per-cpu
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) * mmap) otherwise %0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) * @cpu: cpu number for a per-cpu mmap otherwise %-1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) struct auxtrace_mmap_params {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) size_t mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) off_t offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) size_t len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) int prot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) int idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) pid_t tid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) * struct auxtrace_record - callbacks for recording AUX area data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) * @recording_options: validate and process recording options
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) * @info_priv_size: return the size of the private data in auxtrace_info_event
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) * @info_fill: fill-in the private data in auxtrace_info_event
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) * @free: free this auxtrace record structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) * @snapshot_start: starting a snapshot
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) * @snapshot_finish: finishing a snapshot
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) * @find_snapshot: find data to snapshot within auxtrace mmap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) * @parse_snapshot_options: parse snapshot options
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) * @reference: provide a 64-bit reference number for auxtrace_event
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) * @read_finish: called after reading from an auxtrace mmap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) * @alignment: alignment (if any) for AUX area data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) * @default_aux_sample_size: default sample size for --aux sample option
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) * @pmu: associated pmu
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) * @evlist: selected events list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) struct auxtrace_record {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) int (*recording_options)(struct auxtrace_record *itr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) struct evlist *evlist,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) struct record_opts *opts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) size_t (*info_priv_size)(struct auxtrace_record *itr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) struct evlist *evlist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) int (*info_fill)(struct auxtrace_record *itr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) struct perf_session *session,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) struct perf_record_auxtrace_info *auxtrace_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) size_t priv_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) void (*free)(struct auxtrace_record *itr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) int (*snapshot_start)(struct auxtrace_record *itr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) int (*snapshot_finish)(struct auxtrace_record *itr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) int (*find_snapshot)(struct auxtrace_record *itr, int idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) struct auxtrace_mmap *mm, unsigned char *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) u64 *head, u64 *old);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) int (*parse_snapshot_options)(struct auxtrace_record *itr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) struct record_opts *opts,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) const char *str);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) u64 (*reference)(struct auxtrace_record *itr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) int (*read_finish)(struct auxtrace_record *itr, int idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) unsigned int alignment;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) unsigned int default_aux_sample_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) struct perf_pmu *pmu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) struct evlist *evlist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) * struct addr_filter - address filter.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) * @list: list node
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) * @range: true if it is a range filter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) * @start: true if action is 'filter' or 'start'
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) * @action: 'filter', 'start' or 'stop' ('tracestop' is accepted but converted
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) * to 'stop')
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) * @sym_from: symbol name for the filter address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) * @sym_to: symbol name that determines the filter size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) * @sym_from_idx: selects n'th from symbols with the same name (0 means global
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) * and less than 0 means symbol must be unique)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) * @sym_to_idx: same as @sym_from_idx but for @sym_to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) * @addr: filter address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) * @size: filter region size (for range filters)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) * @filename: DSO file name or NULL for the kernel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) * @str: allocated string that contains the other string members
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) struct addr_filter {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) struct list_head list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) bool range;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) bool start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) const char *action;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) const char *sym_from;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) const char *sym_to;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) int sym_from_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) int sym_to_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) u64 addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) u64 size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) const char *filename;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) char *str;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) * struct addr_filters - list of address filters.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) * @head: list of address filters
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) * @cnt: number of address filters
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) struct addr_filters {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) struct list_head head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) int cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) struct auxtrace_cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) #ifdef HAVE_AUXTRACE_SUPPORT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) * In snapshot mode the mmapped page is read-only which makes using
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) * __sync_val_compare_and_swap() problematic. However, snapshot mode expects
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) * the buffer is not updated while the snapshot is made (e.g. Intel PT disables
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) * the event) so there is not a race anyway.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) static inline u64 auxtrace_mmap__read_snapshot_head(struct auxtrace_mmap *mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) struct perf_event_mmap_page *pc = mm->userpg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) u64 head = READ_ONCE(pc->aux_head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) /* Ensure all reads are done after we read the head */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) rmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) return head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) static inline u64 auxtrace_mmap__read_head(struct auxtrace_mmap *mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) struct perf_event_mmap_page *pc = mm->userpg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) #if BITS_PER_LONG == 64 || !defined(HAVE_SYNC_COMPARE_AND_SWAP_SUPPORT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) u64 head = READ_ONCE(pc->aux_head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) u64 head = __sync_val_compare_and_swap(&pc->aux_head, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) /* Ensure all reads are done after we read the head */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) rmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) return head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) static inline void auxtrace_mmap__write_tail(struct auxtrace_mmap *mm, u64 tail)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) struct perf_event_mmap_page *pc = mm->userpg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) #if BITS_PER_LONG != 64 && defined(HAVE_SYNC_COMPARE_AND_SWAP_SUPPORT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) u64 old_tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) /* Ensure all reads are done before we write the tail out */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) #if BITS_PER_LONG == 64 || !defined(HAVE_SYNC_COMPARE_AND_SWAP_SUPPORT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) pc->aux_tail = tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) old_tail = __sync_val_compare_and_swap(&pc->aux_tail, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) } while (!__sync_bool_compare_and_swap(&pc->aux_tail, old_tail, tail));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) int auxtrace_mmap__mmap(struct auxtrace_mmap *mm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) struct auxtrace_mmap_params *mp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) void *userpg, int fd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) void auxtrace_mmap__munmap(struct auxtrace_mmap *mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) void auxtrace_mmap_params__init(struct auxtrace_mmap_params *mp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) off_t auxtrace_offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) unsigned int auxtrace_pages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) bool auxtrace_overwrite);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) void auxtrace_mmap_params__set_idx(struct auxtrace_mmap_params *mp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) struct evlist *evlist, int idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) bool per_cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) typedef int (*process_auxtrace_t)(struct perf_tool *tool,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) struct mmap *map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) union perf_event *event, void *data1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) size_t len1, void *data2, size_t len2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) int auxtrace_mmap__read(struct mmap *map, struct auxtrace_record *itr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) struct perf_tool *tool, process_auxtrace_t fn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) int auxtrace_mmap__read_snapshot(struct mmap *map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) struct auxtrace_record *itr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) struct perf_tool *tool, process_auxtrace_t fn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) size_t snapshot_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) int auxtrace_queues__init(struct auxtrace_queues *queues);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) int auxtrace_queues__add_event(struct auxtrace_queues *queues,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) struct perf_session *session,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) union perf_event *event, off_t data_offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) struct auxtrace_buffer **buffer_ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) struct auxtrace_queue *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) auxtrace_queues__sample_queue(struct auxtrace_queues *queues,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) struct perf_sample *sample,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) struct perf_session *session);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) int auxtrace_queues__add_sample(struct auxtrace_queues *queues,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) struct perf_session *session,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) struct perf_sample *sample, u64 data_offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) u64 reference);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) void auxtrace_queues__free(struct auxtrace_queues *queues);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) int auxtrace_queues__process_index(struct auxtrace_queues *queues,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) struct perf_session *session);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) int auxtrace_queue_data(struct perf_session *session, bool samples,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) bool events);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) struct auxtrace_buffer *auxtrace_buffer__next(struct auxtrace_queue *queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) struct auxtrace_buffer *buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) void *auxtrace_buffer__get_data(struct auxtrace_buffer *buffer, int fd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) void auxtrace_buffer__put_data(struct auxtrace_buffer *buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) void auxtrace_buffer__drop_data(struct auxtrace_buffer *buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) void auxtrace_buffer__free(struct auxtrace_buffer *buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) int auxtrace_heap__add(struct auxtrace_heap *heap, unsigned int queue_nr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) u64 ordinal);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) void auxtrace_heap__pop(struct auxtrace_heap *heap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) void auxtrace_heap__free(struct auxtrace_heap *heap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) struct auxtrace_cache_entry {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) struct hlist_node hash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) u32 key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) struct auxtrace_cache *auxtrace_cache__new(unsigned int bits, size_t entry_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) unsigned int limit_percent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) void auxtrace_cache__free(struct auxtrace_cache *auxtrace_cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) void *auxtrace_cache__alloc_entry(struct auxtrace_cache *c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) void auxtrace_cache__free_entry(struct auxtrace_cache *c, void *entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) int auxtrace_cache__add(struct auxtrace_cache *c, u32 key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) struct auxtrace_cache_entry *entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) void auxtrace_cache__remove(struct auxtrace_cache *c, u32 key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) void *auxtrace_cache__lookup(struct auxtrace_cache *c, u32 key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) struct auxtrace_record *auxtrace_record__init(struct evlist *evlist,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) int *err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) int auxtrace_parse_snapshot_options(struct auxtrace_record *itr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) struct record_opts *opts,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) const char *str);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) int auxtrace_parse_sample_options(struct auxtrace_record *itr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) struct evlist *evlist,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) struct record_opts *opts, const char *str);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) int auxtrace_record__options(struct auxtrace_record *itr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) struct evlist *evlist,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) struct record_opts *opts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) size_t auxtrace_record__info_priv_size(struct auxtrace_record *itr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) struct evlist *evlist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) int auxtrace_record__info_fill(struct auxtrace_record *itr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) struct perf_session *session,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) struct perf_record_auxtrace_info *auxtrace_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) size_t priv_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) void auxtrace_record__free(struct auxtrace_record *itr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) int auxtrace_record__snapshot_start(struct auxtrace_record *itr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) int auxtrace_record__snapshot_finish(struct auxtrace_record *itr, bool on_exit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) int auxtrace_record__find_snapshot(struct auxtrace_record *itr, int idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) struct auxtrace_mmap *mm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) unsigned char *data, u64 *head, u64 *old);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) u64 auxtrace_record__reference(struct auxtrace_record *itr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) int auxtrace_record__read_finish(struct auxtrace_record *itr, int idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) int auxtrace_index__auxtrace_event(struct list_head *head, union perf_event *event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) off_t file_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) int auxtrace_index__write(int fd, struct list_head *head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) int auxtrace_index__process(int fd, u64 size, struct perf_session *session,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) bool needs_swap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) void auxtrace_index__free(struct list_head *head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) void auxtrace_synth_error(struct perf_record_auxtrace_error *auxtrace_error, int type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) int code, int cpu, pid_t pid, pid_t tid, u64 ip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) const char *msg, u64 timestamp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) int perf_event__process_auxtrace_info(struct perf_session *session,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) union perf_event *event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) s64 perf_event__process_auxtrace(struct perf_session *session,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) union perf_event *event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) int perf_event__process_auxtrace_error(struct perf_session *session,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) union perf_event *event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) int itrace_parse_synth_opts(const struct option *opt, const char *str,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) int unset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) void itrace_synth_opts__set_default(struct itrace_synth_opts *synth_opts,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) bool no_sample);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) size_t perf_event__fprintf_auxtrace_error(union perf_event *event, FILE *fp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) void perf_session__auxtrace_error_inc(struct perf_session *session,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) union perf_event *event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) void events_stats__auxtrace_error_warn(const struct events_stats *stats);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) void addr_filters__init(struct addr_filters *filts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) void addr_filters__exit(struct addr_filters *filts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) int addr_filters__parse_bare_filter(struct addr_filters *filts,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) const char *filter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) int auxtrace_parse_filters(struct evlist *evlist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) int auxtrace__process_event(struct perf_session *session, union perf_event *event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) struct perf_sample *sample, struct perf_tool *tool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) void auxtrace__dump_auxtrace_sample(struct perf_session *session,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) struct perf_sample *sample);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) int auxtrace__flush_events(struct perf_session *session, struct perf_tool *tool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) void auxtrace__free_events(struct perf_session *session);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) void auxtrace__free(struct perf_session *session);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) bool auxtrace__evsel_is_auxtrace(struct perf_session *session,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) struct evsel *evsel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) #define ITRACE_HELP \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) " i[period]: synthesize instructions events\n" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) " b: synthesize branches events (branch misses for Arm SPE)\n" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) " c: synthesize branches events (calls only)\n" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) " r: synthesize branches events (returns only)\n" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) " x: synthesize transactions events\n" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) " w: synthesize ptwrite events\n" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) " p: synthesize power events\n" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) " o: synthesize other events recorded due to the use\n" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) " of aux-output (refer to perf record)\n" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) " e[flags]: synthesize error events\n" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) " each flag must be preceded by + or -\n" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) " error flags are: o (overflow)\n" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) " l (data lost)\n" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) " d[flags]: create a debug log\n" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) " each flag must be preceded by + or -\n" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) " log flags are: a (all perf events)\n" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) " f: synthesize first level cache events\n" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) " m: synthesize last level cache events\n" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) " t: synthesize TLB events\n" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) " a: synthesize remote access events\n" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) " g[len]: synthesize a call chain (use with i or x)\n" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) " G[len]: synthesize a call chain on existing event records\n" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) " l[len]: synthesize last branch entries (use with i or x)\n" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) " L[len]: synthesize last branch entries on existing event records\n" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) " sNUMBER: skip initial number of events\n" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) " q: quicker (less detailed) decoding\n" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) " PERIOD[ns|us|ms|i|t]: specify period to sample stream\n" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) " concatenate multiple options. Default is ibxwpe or cewp\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) static inline
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) void itrace_synth_opts__set_time_range(struct itrace_synth_opts *opts,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) struct perf_time_interval *ptime_range,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) int range_num)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) opts->ptime_range = ptime_range;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) opts->range_num = range_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) static inline
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) void itrace_synth_opts__clear_time_range(struct itrace_synth_opts *opts)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) opts->ptime_range = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) opts->range_num = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) #include "debug.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) static inline struct auxtrace_record *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) auxtrace_record__init(struct evlist *evlist __maybe_unused,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) int *err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) *err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) static inline
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) void auxtrace_record__free(struct auxtrace_record *itr __maybe_unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) static inline
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) int auxtrace_record__options(struct auxtrace_record *itr __maybe_unused,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) struct evlist *evlist __maybe_unused,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) struct record_opts *opts __maybe_unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) #define perf_event__process_auxtrace_info 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) #define perf_event__process_auxtrace 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) #define perf_event__process_auxtrace_error 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) static inline
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) void perf_session__auxtrace_error_inc(struct perf_session *session
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) __maybe_unused,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) union perf_event *event
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) __maybe_unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) static inline
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) void events_stats__auxtrace_error_warn(const struct events_stats *stats
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) __maybe_unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) static inline
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) int itrace_parse_synth_opts(const struct option *opt __maybe_unused,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) const char *str __maybe_unused,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) int unset __maybe_unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) pr_err("AUX area tracing not supported\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) static inline
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) int auxtrace_parse_snapshot_options(struct auxtrace_record *itr __maybe_unused,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) struct record_opts *opts __maybe_unused,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) const char *str)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) if (!str)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) pr_err("AUX area tracing not supported\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) static inline
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) int auxtrace_parse_sample_options(struct auxtrace_record *itr __maybe_unused,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) struct evlist *evlist __maybe_unused,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) struct record_opts *opts __maybe_unused,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) const char *str)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) if (!str)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) pr_err("AUX area tracing not supported\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) static inline
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) int auxtrace__process_event(struct perf_session *session __maybe_unused,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) union perf_event *event __maybe_unused,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) struct perf_sample *sample __maybe_unused,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) struct perf_tool *tool __maybe_unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) static inline
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) void auxtrace__dump_auxtrace_sample(struct perf_session *session __maybe_unused,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) struct perf_sample *sample __maybe_unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) static inline
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) int auxtrace__flush_events(struct perf_session *session __maybe_unused,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) struct perf_tool *tool __maybe_unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) static inline
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) void auxtrace__free_events(struct perf_session *session __maybe_unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) static inline
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) void auxtrace_cache__free(struct auxtrace_cache *auxtrace_cache __maybe_unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) static inline
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) void auxtrace__free(struct perf_session *session __maybe_unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) static inline
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) int auxtrace_index__write(int fd __maybe_unused,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) struct list_head *head __maybe_unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) static inline
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) int auxtrace_index__process(int fd __maybe_unused,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) u64 size __maybe_unused,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) struct perf_session *session __maybe_unused,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) bool needs_swap __maybe_unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) static inline
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) void auxtrace_index__free(struct list_head *head __maybe_unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) static inline
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) bool auxtrace__evsel_is_auxtrace(struct perf_session *session __maybe_unused,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) struct evsel *evsel __maybe_unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) static inline
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) int auxtrace_parse_filters(struct evlist *evlist __maybe_unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) int auxtrace_mmap__mmap(struct auxtrace_mmap *mm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) struct auxtrace_mmap_params *mp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) void *userpg, int fd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) void auxtrace_mmap__munmap(struct auxtrace_mmap *mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) void auxtrace_mmap_params__init(struct auxtrace_mmap_params *mp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) off_t auxtrace_offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) unsigned int auxtrace_pages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) bool auxtrace_overwrite);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) void auxtrace_mmap_params__set_idx(struct auxtrace_mmap_params *mp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) struct evlist *evlist, int idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) bool per_cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) #define ITRACE_HELP ""
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) static inline
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) void itrace_synth_opts__set_time_range(struct itrace_synth_opts *opts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) __maybe_unused,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) struct perf_time_interval *ptime_range
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) __maybe_unused,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) int range_num __maybe_unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) static inline
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) void itrace_synth_opts__clear_time_range(struct itrace_synth_opts *opts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) __maybe_unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) #endif