Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) //
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3) // Linux performance counter support for ARC CPUs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4) // This code is inspired by the perf support of various other architectures.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5) //
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6) // Copyright (C) 2013-2018 Synopsys, Inc. (www.synopsys.com)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8) #include <linux/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) #include <linux/of.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) #include <linux/perf_event.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) #include <linux/platform_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) #include <asm/arcregs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) #include <asm/stacktrace.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) /* HW holds 8 symbols + one for null terminator */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) #define ARCPMU_EVENT_NAME_LEN	9
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) enum arc_pmu_attr_groups {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) 	ARCPMU_ATTR_GR_EVENTS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) 	ARCPMU_ATTR_GR_FORMATS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) 	ARCPMU_NR_ATTR_GR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) struct arc_pmu_raw_event_entry {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) 	char name[ARCPMU_EVENT_NAME_LEN];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) struct arc_pmu {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) 	struct pmu	pmu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) 	unsigned int	irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) 	int		n_counters;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) 	int		n_events;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) 	u64		max_period;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) 	int		ev_hw_idx[PERF_COUNT_ARC_HW_MAX];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) 	struct arc_pmu_raw_event_entry	*raw_entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) 	struct attribute		**attrs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) 	struct perf_pmu_events_attr	*attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 	const struct attribute_group	*attr_groups[ARCPMU_NR_ATTR_GR + 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) struct arc_pmu_cpu {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) 	 * A 1 bit for an index indicates that the counter is being used for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) 	 * an event. A 0 means that the counter can be used.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 	unsigned long	used_mask[BITS_TO_LONGS(ARC_PERF_MAX_COUNTERS)];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 	 * The events that are active on the PMU for the given index.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 	struct perf_event *act_counter[ARC_PERF_MAX_COUNTERS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) struct arc_callchain_trace {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 	int depth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 	void *perf_stuff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) static int callchain_trace(unsigned int addr, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 	struct arc_callchain_trace *ctrl = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 	struct perf_callchain_entry_ctx *entry = ctrl->perf_stuff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 	perf_callchain_store(entry, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 	if (ctrl->depth++ < 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 	return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 			   struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 	struct arc_callchain_trace ctrl = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 		.depth = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 		.perf_stuff = entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 	arc_unwind_core(NULL, regs, callchain_trace, &ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) void perf_callchain_user(struct perf_callchain_entry_ctx *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 			 struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 	 * User stack can't be unwound trivially with kernel dwarf unwinder
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 	 * So for now just record the user PC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 	perf_callchain_store(entry, instruction_pointer(regs));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) static struct arc_pmu *arc_pmu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) static DEFINE_PER_CPU(struct arc_pmu_cpu, arc_pmu_cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) /* read counter #idx; note that counter# != event# on ARC! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) static u64 arc_pmu_read_counter(int idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 	u32 tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 	u64 result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 	 * ARC supports making 'snapshots' of the counters, so we don't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 	 * need to care about counters wrapping to 0 underneath our feet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 	write_aux_reg(ARC_REG_PCT_INDEX, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 	tmp = read_aux_reg(ARC_REG_PCT_CONTROL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 	write_aux_reg(ARC_REG_PCT_CONTROL, tmp | ARC_REG_PCT_CONTROL_SN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 	result = (u64) (read_aux_reg(ARC_REG_PCT_SNAPH)) << 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 	result |= read_aux_reg(ARC_REG_PCT_SNAPL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 	return result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) static void arc_perf_event_update(struct perf_event *event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 				  struct hw_perf_event *hwc, int idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 	u64 prev_raw_count = local64_read(&hwc->prev_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 	u64 new_raw_count = arc_pmu_read_counter(idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 	s64 delta = new_raw_count - prev_raw_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 	 * We aren't afraid of hwc->prev_count changing beneath our feet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 	 * because there's no way for us to re-enter this function anytime.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 	local64_set(&hwc->prev_count, new_raw_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 	local64_add(delta, &event->count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 	local64_sub(delta, &hwc->period_left);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) static void arc_pmu_read(struct perf_event *event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 	arc_perf_event_update(event, &event->hw, event->hw.idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) static int arc_pmu_cache_event(u64 config)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 	unsigned int cache_type, cache_op, cache_result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 	cache_type	= (config >>  0) & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 	cache_op	= (config >>  8) & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 	cache_result	= (config >> 16) & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 	if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 	if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 	if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 	ret = arc_pmu_cache_map[cache_type][cache_op][cache_result];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 	if (ret == CACHE_OP_UNSUPPORTED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 		return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 	pr_debug("init cache event: type/op/result %d/%d/%d with h/w %d \'%s\'\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 		 cache_type, cache_op, cache_result, ret,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 		 arc_pmu_ev_hw_map[ret]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) /* initializes hw_perf_event structure if event is supported */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) static int arc_pmu_event_init(struct perf_event *event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 	struct hw_perf_event *hwc = &event->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 	if (!is_sampling_event(event)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 		hwc->sample_period = arc_pmu->max_period;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 		hwc->last_period = hwc->sample_period;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 		local64_set(&hwc->period_left, hwc->sample_period);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 	hwc->config = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 	if (is_isa_arcv2()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 		/* "exclude user" means "count only kernel" */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 		if (event->attr.exclude_user)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 			hwc->config |= ARC_REG_PCT_CONFIG_KERN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 		/* "exclude kernel" means "count only user" */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 		if (event->attr.exclude_kernel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 			hwc->config |= ARC_REG_PCT_CONFIG_USER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 	switch (event->attr.type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 	case PERF_TYPE_HARDWARE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 		if (event->attr.config >= PERF_COUNT_HW_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 			return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 		if (arc_pmu->ev_hw_idx[event->attr.config] < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 			return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 		hwc->config |= arc_pmu->ev_hw_idx[event->attr.config];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 		pr_debug("init event %d with h/w %08x \'%s\'\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 			 (int)event->attr.config, (int)hwc->config,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 			 arc_pmu_ev_hw_map[event->attr.config]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 	case PERF_TYPE_HW_CACHE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 		ret = arc_pmu_cache_event(event->attr.config);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 		if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 			return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 		hwc->config |= arc_pmu->ev_hw_idx[ret];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 		pr_debug("init cache event with h/w %08x \'%s\'\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 			 (int)hwc->config, arc_pmu_ev_hw_map[ret]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 	case PERF_TYPE_RAW:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 		if (event->attr.config >= arc_pmu->n_events)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 			return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 		hwc->config |= event->attr.config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 		pr_debug("init raw event with idx %lld \'%s\'\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 			 event->attr.config,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 			 arc_pmu->raw_entry[event->attr.config].name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 		return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) /* starts all counters */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) static void arc_pmu_enable(struct pmu *pmu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 	u32 tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 	tmp = read_aux_reg(ARC_REG_PCT_CONTROL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 	write_aux_reg(ARC_REG_PCT_CONTROL, (tmp & 0xffff0000) | 0x1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) /* stops all counters */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) static void arc_pmu_disable(struct pmu *pmu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 	u32 tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 	tmp = read_aux_reg(ARC_REG_PCT_CONTROL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 	write_aux_reg(ARC_REG_PCT_CONTROL, (tmp & 0xffff0000) | 0x0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) static int arc_pmu_event_set_period(struct perf_event *event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 	struct hw_perf_event *hwc = &event->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 	s64 left = local64_read(&hwc->period_left);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 	s64 period = hwc->sample_period;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 	int idx = hwc->idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 	int overflow = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 	u64 value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 	if (unlikely(left <= -period)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 		/* left underflowed by more than period. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 		left = period;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 		local64_set(&hwc->period_left, left);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 		hwc->last_period = period;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 		overflow = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 	} else if (unlikely(left <= 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 		/* left underflowed by less than period. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 		left += period;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 		local64_set(&hwc->period_left, left);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 		hwc->last_period = period;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 		overflow = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 	if (left > arc_pmu->max_period)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 		left = arc_pmu->max_period;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 	value = arc_pmu->max_period - left;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) 	local64_set(&hwc->prev_count, value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 	/* Select counter */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) 	write_aux_reg(ARC_REG_PCT_INDEX, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) 	/* Write value */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) 	write_aux_reg(ARC_REG_PCT_COUNTL, lower_32_bits(value));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) 	write_aux_reg(ARC_REG_PCT_COUNTH, upper_32_bits(value));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) 	perf_event_update_userpage(event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) 	return overflow;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285)  * Assigns hardware counter to hardware condition.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286)  * Note that there is no separate start/stop mechanism;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287)  * stopping is achieved by assigning the 'never' condition
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) static void arc_pmu_start(struct perf_event *event, int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) 	struct hw_perf_event *hwc = &event->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) 	int idx = hwc->idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) 	if (WARN_ON_ONCE(idx == -1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) 	if (flags & PERF_EF_RELOAD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) 		WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) 	hwc->state = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) 	arc_pmu_event_set_period(event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) 	/* Enable interrupt for this counter */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) 	if (is_sampling_event(event))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) 		write_aux_reg(ARC_REG_PCT_INT_CTRL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) 			      read_aux_reg(ARC_REG_PCT_INT_CTRL) | BIT(idx));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) 	/* enable ARC pmu here */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) 	write_aux_reg(ARC_REG_PCT_INDEX, idx);		/* counter # */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) 	write_aux_reg(ARC_REG_PCT_CONFIG, hwc->config);	/* condition */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) static void arc_pmu_stop(struct perf_event *event, int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) 	struct hw_perf_event *hwc = &event->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) 	int idx = hwc->idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) 	/* Disable interrupt for this counter */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) 	if (is_sampling_event(event)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) 		 * Reset interrupt flag by writing of 1. This is required
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) 		 * to make sure pending interrupt was not left.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) 		write_aux_reg(ARC_REG_PCT_INT_ACT, BIT(idx));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) 		write_aux_reg(ARC_REG_PCT_INT_CTRL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) 			      read_aux_reg(ARC_REG_PCT_INT_CTRL) & ~BIT(idx));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) 	if (!(event->hw.state & PERF_HES_STOPPED)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) 		/* stop ARC pmu here */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) 		write_aux_reg(ARC_REG_PCT_INDEX, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) 		/* condition code #0 is always "never" */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) 		write_aux_reg(ARC_REG_PCT_CONFIG, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) 		event->hw.state |= PERF_HES_STOPPED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) 	if ((flags & PERF_EF_UPDATE) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) 	    !(event->hw.state & PERF_HES_UPTODATE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) 		arc_perf_event_update(event, &event->hw, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) 		event->hw.state |= PERF_HES_UPTODATE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) static void arc_pmu_del(struct perf_event *event, int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) 	struct arc_pmu_cpu *pmu_cpu = this_cpu_ptr(&arc_pmu_cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) 	arc_pmu_stop(event, PERF_EF_UPDATE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) 	__clear_bit(event->hw.idx, pmu_cpu->used_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) 	pmu_cpu->act_counter[event->hw.idx] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) 	perf_event_update_userpage(event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) /* allocate hardware counter and optionally start counting */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) static int arc_pmu_add(struct perf_event *event, int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) 	struct arc_pmu_cpu *pmu_cpu = this_cpu_ptr(&arc_pmu_cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) 	struct hw_perf_event *hwc = &event->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) 	int idx = hwc->idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) 	idx = ffz(pmu_cpu->used_mask[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) 	if (idx == arc_pmu->n_counters)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) 		return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) 	__set_bit(idx, pmu_cpu->used_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) 	hwc->idx = idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) 	write_aux_reg(ARC_REG_PCT_INDEX, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) 	pmu_cpu->act_counter[idx] = event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) 	if (is_sampling_event(event)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) 		/* Mimic full counter overflow as other arches do */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) 		write_aux_reg(ARC_REG_PCT_INT_CNTL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) 			      lower_32_bits(arc_pmu->max_period));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) 		write_aux_reg(ARC_REG_PCT_INT_CNTH,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) 			      upper_32_bits(arc_pmu->max_period));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) 	write_aux_reg(ARC_REG_PCT_CONFIG, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) 	write_aux_reg(ARC_REG_PCT_COUNTL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) 	write_aux_reg(ARC_REG_PCT_COUNTH, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) 	local64_set(&hwc->prev_count, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) 	hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) 	if (flags & PERF_EF_START)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) 		arc_pmu_start(event, PERF_EF_RELOAD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) 	perf_event_update_userpage(event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) #ifdef CONFIG_ISA_ARCV2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) static irqreturn_t arc_pmu_intr(int irq, void *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) 	struct perf_sample_data data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) 	struct arc_pmu_cpu *pmu_cpu = this_cpu_ptr(&arc_pmu_cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) 	struct pt_regs *regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) 	unsigned int active_ints;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) 	int idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) 	arc_pmu_disable(&arc_pmu->pmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) 	active_ints = read_aux_reg(ARC_REG_PCT_INT_ACT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) 	if (!active_ints)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) 		goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) 	regs = get_irq_regs();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) 		struct perf_event *event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) 		struct hw_perf_event *hwc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) 		idx = __ffs(active_ints);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) 		/* Reset interrupt flag by writing of 1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) 		write_aux_reg(ARC_REG_PCT_INT_ACT, BIT(idx));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) 		 * On reset of "interrupt active" bit corresponding
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) 		 * "interrupt enable" bit gets automatically reset as well.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) 		 * Now we need to re-enable interrupt for the counter.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) 		write_aux_reg(ARC_REG_PCT_INT_CTRL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) 			read_aux_reg(ARC_REG_PCT_INT_CTRL) | BIT(idx));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) 		event = pmu_cpu->act_counter[idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) 		hwc = &event->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) 		WARN_ON_ONCE(hwc->idx != idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) 		arc_perf_event_update(event, &event->hw, event->hw.idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) 		perf_sample_data_init(&data, 0, hwc->last_period);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) 		if (arc_pmu_event_set_period(event)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) 			if (perf_event_overflow(event, &data, regs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) 				arc_pmu_stop(event, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) 		active_ints &= ~BIT(idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) 	} while (active_ints);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) 	arc_pmu_enable(&arc_pmu->pmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) 	return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) static irqreturn_t arc_pmu_intr(int irq, void *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) 	return IRQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) #endif /* CONFIG_ISA_ARCV2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) static void arc_cpu_pmu_irq_init(void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) 	int irq = *(int *)data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) 	enable_percpu_irq(irq, IRQ_TYPE_NONE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) 	/* Clear all pending interrupt flags */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) 	write_aux_reg(ARC_REG_PCT_INT_ACT, 0xffffffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) /* Event field occupies the bottom 15 bits of our config field */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) PMU_FORMAT_ATTR(event, "config:0-14");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) static struct attribute *arc_pmu_format_attrs[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) 	&format_attr_event.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) 	NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) static struct attribute_group arc_pmu_format_attr_gr = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) 	.name = "format",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) 	.attrs = arc_pmu_format_attrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) static ssize_t arc_pmu_events_sysfs_show(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) 					 struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) 					 char *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) 	struct perf_pmu_events_attr *pmu_attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) 	pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) 	return sprintf(page, "event=0x%04llx\n", pmu_attr->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495)  * We don't add attrs here as we don't have pre-defined list of perf events.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496)  * We will generate and add attrs dynamically in probe() after we read HW
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497)  * configuration.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) static struct attribute_group arc_pmu_events_attr_gr = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) 	.name = "events",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) static void arc_pmu_add_raw_event_attr(int j, char *str)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) 	memmove(arc_pmu->raw_entry[j].name, str, ARCPMU_EVENT_NAME_LEN - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) 	arc_pmu->attr[j].attr.attr.name = arc_pmu->raw_entry[j].name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) 	arc_pmu->attr[j].attr.attr.mode = VERIFY_OCTAL_PERMISSIONS(0444);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) 	arc_pmu->attr[j].attr.show = arc_pmu_events_sysfs_show;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) 	arc_pmu->attr[j].id = j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) 	arc_pmu->attrs[j] = &(arc_pmu->attr[j].attr.attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) static int arc_pmu_raw_alloc(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) 	arc_pmu->attr = devm_kmalloc_array(dev, arc_pmu->n_events + 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) 		sizeof(*arc_pmu->attr), GFP_KERNEL | __GFP_ZERO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) 	if (!arc_pmu->attr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) 	arc_pmu->attrs = devm_kmalloc_array(dev, arc_pmu->n_events + 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) 		sizeof(*arc_pmu->attrs), GFP_KERNEL | __GFP_ZERO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) 	if (!arc_pmu->attrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) 	arc_pmu->raw_entry = devm_kmalloc_array(dev, arc_pmu->n_events,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) 		sizeof(*arc_pmu->raw_entry), GFP_KERNEL | __GFP_ZERO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) 	if (!arc_pmu->raw_entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) static inline bool event_in_hw_event_map(int i, char *name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) 	if (!arc_pmu_ev_hw_map[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) 	if (!strlen(arc_pmu_ev_hw_map[i]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) 	if (strcmp(arc_pmu_ev_hw_map[i], name))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) static void arc_pmu_map_hw_event(int j, char *str)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) 	/* See if HW condition has been mapped to a perf event_id */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) 	for (i = 0; i < ARRAY_SIZE(arc_pmu_ev_hw_map); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) 		if (event_in_hw_event_map(i, str)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) 			pr_debug("mapping perf event %2d to h/w event \'%8s\' (idx %d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) 				 i, str, j);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) 			arc_pmu->ev_hw_idx[i] = j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) static int arc_pmu_device_probe(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) 	struct arc_reg_pct_build pct_bcr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) 	struct arc_reg_cc_build cc_bcr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) 	int i, has_interrupts, irq = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) 	int counter_size;	/* in bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) 	union cc_name {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) 		struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) 			u32 word0, word1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) 			char sentinel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) 		} indiv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) 		char str[ARCPMU_EVENT_NAME_LEN];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) 	} cc_name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) 	READ_BCR(ARC_REG_PCT_BUILD, pct_bcr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) 	if (!pct_bcr.v) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) 		pr_err("This core does not have performance counters!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) 	BUILD_BUG_ON(ARC_PERF_MAX_COUNTERS > 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) 	if (WARN_ON(pct_bcr.c > ARC_PERF_MAX_COUNTERS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) 	READ_BCR(ARC_REG_CC_BUILD, cc_bcr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) 	if (WARN(!cc_bcr.v, "Counters exist but No countable conditions?"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) 	arc_pmu = devm_kzalloc(&pdev->dev, sizeof(struct arc_pmu), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) 	if (!arc_pmu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) 	arc_pmu->n_events = cc_bcr.c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) 	if (arc_pmu_raw_alloc(&pdev->dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) 	has_interrupts = is_isa_arcv2() ? pct_bcr.i : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) 	arc_pmu->n_counters = pct_bcr.c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) 	counter_size = 32 + (pct_bcr.s << 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) 	arc_pmu->max_period = (1ULL << counter_size) / 2 - 1ULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) 	pr_info("ARC perf\t: %d counters (%d bits), %d conditions%s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) 		arc_pmu->n_counters, counter_size, cc_bcr.c,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) 		has_interrupts ? ", [overflow IRQ support]" : "");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) 	cc_name.str[ARCPMU_EVENT_NAME_LEN - 1] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) 	for (i = 0; i < PERF_COUNT_ARC_HW_MAX; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) 		arc_pmu->ev_hw_idx[i] = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) 	/* loop thru all available h/w condition indexes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) 	for (i = 0; i < cc_bcr.c; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) 		write_aux_reg(ARC_REG_CC_INDEX, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) 		cc_name.indiv.word0 = le32_to_cpu(read_aux_reg(ARC_REG_CC_NAME0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) 		cc_name.indiv.word1 = le32_to_cpu(read_aux_reg(ARC_REG_CC_NAME1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) 		arc_pmu_map_hw_event(i, cc_name.str);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) 		arc_pmu_add_raw_event_attr(i, cc_name.str);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) 	arc_pmu_events_attr_gr.attrs = arc_pmu->attrs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) 	arc_pmu->attr_groups[ARCPMU_ATTR_GR_EVENTS] = &arc_pmu_events_attr_gr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) 	arc_pmu->attr_groups[ARCPMU_ATTR_GR_FORMATS] = &arc_pmu_format_attr_gr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) 	arc_pmu->pmu = (struct pmu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) 		.pmu_enable	= arc_pmu_enable,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) 		.pmu_disable	= arc_pmu_disable,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) 		.event_init	= arc_pmu_event_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) 		.add		= arc_pmu_add,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) 		.del		= arc_pmu_del,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) 		.start		= arc_pmu_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) 		.stop		= arc_pmu_stop,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) 		.read		= arc_pmu_read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) 		.attr_groups	= arc_pmu->attr_groups,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) 	if (has_interrupts) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) 		irq = platform_get_irq(pdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) 		if (irq >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) 			int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) 			arc_pmu->irq = irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) 			/* intc map function ensures irq_set_percpu_devid() called */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) 			ret = request_percpu_irq(irq, arc_pmu_intr, "ARC perf counters",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) 						 this_cpu_ptr(&arc_pmu_cpu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) 			if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) 				on_each_cpu(arc_cpu_pmu_irq_init, &irq, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) 			else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) 				irq = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) 	if (irq == -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) 		arc_pmu->pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) 	 * perf parser doesn't really like '-' symbol in events name, so let's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) 	 * use '_' in arc pct name as it goes to kernel PMU event prefix.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) 	return perf_pmu_register(&arc_pmu->pmu, "arc_pct", PERF_TYPE_RAW);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) static const struct of_device_id arc_pmu_match[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) 	{ .compatible = "snps,arc700-pct" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) 	{ .compatible = "snps,archs-pct" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) 	{},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) MODULE_DEVICE_TABLE(of, arc_pmu_match);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) static struct platform_driver arc_pmu_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) 	.driver	= {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) 		.name		= "arc-pct",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) 		.of_match_table = of_match_ptr(arc_pmu_match),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) 	.probe		= arc_pmu_device_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) module_platform_driver(arc_pmu_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) MODULE_LICENSE("GPL");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) MODULE_AUTHOR("Mischa Jonker <mjonker@synopsys.com>");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) MODULE_DESCRIPTION("ARC PMU driver");