Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) /* SPDX-License-Identifier: GPL-2.0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  * Copyright (C) 2009 Jaswinder Singh Rajput
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8)  * Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9)  * Copyright (C) 2009 Google, Inc., Stephane Eranian
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10)  * Copyright 2014 Tilera Corporation. All Rights Reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11)  * Copyright (C) 2018 Andes Technology Corporation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13)  * Perf_events support for RISC-V platforms.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15)  * Since the spec. (as of now, Priv-Spec 1.10) does not provide enough
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16)  * functionality for perf event to fully work, this file provides
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17)  * the very basic framework only.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19)  * For platform portings, please check Documentations/riscv/pmu.txt.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21)  * The Copyright line includes x86 and tile ones.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) #include <linux/kprobes.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) #include <linux/kdebug.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) #include <linux/mutex.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) #include <linux/bitmap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) #include <linux/irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) #include <linux/perf_event.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) #include <linux/atomic.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) #include <linux/of.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) #include <asm/perf_event.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) static const struct riscv_pmu *riscv_pmu __read_mostly;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39)  * Hardware & cache maps and their methods
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) static const int riscv_hw_event_map[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) 	[PERF_COUNT_HW_CPU_CYCLES]		= RISCV_PMU_CYCLE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) 	[PERF_COUNT_HW_INSTRUCTIONS]		= RISCV_PMU_INSTRET,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 	[PERF_COUNT_HW_CACHE_REFERENCES]	= RISCV_OP_UNSUPP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) 	[PERF_COUNT_HW_CACHE_MISSES]		= RISCV_OP_UNSUPP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) 	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS]	= RISCV_OP_UNSUPP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) 	[PERF_COUNT_HW_BRANCH_MISSES]		= RISCV_OP_UNSUPP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 	[PERF_COUNT_HW_BUS_CYCLES]		= RISCV_OP_UNSUPP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) #define C(x) PERF_COUNT_HW_CACHE_##x
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) static const int riscv_cache_event_map[PERF_COUNT_HW_CACHE_MAX]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) [PERF_COUNT_HW_CACHE_OP_MAX]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 	[C(L1D)] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 		[C(OP_READ)] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 			[C(RESULT_ACCESS)] = RISCV_OP_UNSUPP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 			[C(RESULT_MISS)] = RISCV_OP_UNSUPP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 		},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 		[C(OP_WRITE)] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 			[C(RESULT_ACCESS)] = RISCV_OP_UNSUPP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) 			[C(RESULT_MISS)] = RISCV_OP_UNSUPP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 		},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 		[C(OP_PREFETCH)] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 			[C(RESULT_ACCESS)] = RISCV_OP_UNSUPP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 			[C(RESULT_MISS)] = RISCV_OP_UNSUPP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 		},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 	[C(L1I)] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 		[C(OP_READ)] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 			[C(RESULT_ACCESS)] = RISCV_OP_UNSUPP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 			[C(RESULT_MISS)] = RISCV_OP_UNSUPP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 		},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 		[C(OP_WRITE)] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 			[C(RESULT_ACCESS)] = RISCV_OP_UNSUPP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 			[C(RESULT_MISS)] = RISCV_OP_UNSUPP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 		},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 		[C(OP_PREFETCH)] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 			[C(RESULT_ACCESS)] = RISCV_OP_UNSUPP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 			[C(RESULT_MISS)] = RISCV_OP_UNSUPP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 		},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 	[C(LL)] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 		[C(OP_READ)] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 			[C(RESULT_ACCESS)] = RISCV_OP_UNSUPP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 			[C(RESULT_MISS)] = RISCV_OP_UNSUPP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 		},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 		[C(OP_WRITE)] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 			[C(RESULT_ACCESS)] = RISCV_OP_UNSUPP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 			[C(RESULT_MISS)] = RISCV_OP_UNSUPP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 		},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 		[C(OP_PREFETCH)] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 			[C(RESULT_ACCESS)] = RISCV_OP_UNSUPP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 			[C(RESULT_MISS)] = RISCV_OP_UNSUPP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 		},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 	[C(DTLB)] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 		[C(OP_READ)] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 			[C(RESULT_ACCESS)] =  RISCV_OP_UNSUPP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 			[C(RESULT_MISS)] =  RISCV_OP_UNSUPP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 		},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 		[C(OP_WRITE)] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 			[C(RESULT_ACCESS)] = RISCV_OP_UNSUPP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 			[C(RESULT_MISS)] = RISCV_OP_UNSUPP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 		},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 		[C(OP_PREFETCH)] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 			[C(RESULT_ACCESS)] = RISCV_OP_UNSUPP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 			[C(RESULT_MISS)] = RISCV_OP_UNSUPP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 		},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 	[C(ITLB)] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 		[C(OP_READ)] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 			[C(RESULT_ACCESS)] = RISCV_OP_UNSUPP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 			[C(RESULT_MISS)] = RISCV_OP_UNSUPP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 		},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 		[C(OP_WRITE)] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 			[C(RESULT_ACCESS)] = RISCV_OP_UNSUPP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 			[C(RESULT_MISS)] = RISCV_OP_UNSUPP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 		},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 		[C(OP_PREFETCH)] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 			[C(RESULT_ACCESS)] = RISCV_OP_UNSUPP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 			[C(RESULT_MISS)] = RISCV_OP_UNSUPP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 		},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 	[C(BPU)] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 		[C(OP_READ)] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 			[C(RESULT_ACCESS)] = RISCV_OP_UNSUPP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 			[C(RESULT_MISS)] = RISCV_OP_UNSUPP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 		},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 		[C(OP_WRITE)] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 			[C(RESULT_ACCESS)] = RISCV_OP_UNSUPP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 			[C(RESULT_MISS)] = RISCV_OP_UNSUPP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 		},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 		[C(OP_PREFETCH)] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 			[C(RESULT_ACCESS)] = RISCV_OP_UNSUPP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 			[C(RESULT_MISS)] = RISCV_OP_UNSUPP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 		},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) static int riscv_map_hw_event(u64 config)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 	if (config >= riscv_pmu->max_events)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 	return riscv_pmu->hw_events[config];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) static int riscv_map_cache_decode(u64 config, unsigned int *type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 			   unsigned int *op, unsigned int *result)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 	return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) static int riscv_map_cache_event(u64 config)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 	unsigned int type, op, result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 	int err = -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 		int code;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 	err = riscv_map_cache_decode(config, &type, &op, &result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 	if (!riscv_pmu->cache_events || err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 	if (type >= PERF_COUNT_HW_CACHE_MAX ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 	    op >= PERF_COUNT_HW_CACHE_OP_MAX ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 	    result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 	code = (*riscv_pmu->cache_events)[type][op][result];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 	if (code == RISCV_OP_UNSUPP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 	return code;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179)  * Low-level functions: reading/writing counters
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) static inline u64 read_counter(int idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 	u64 val = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 	switch (idx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 	case RISCV_PMU_CYCLE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 		val = csr_read(CSR_CYCLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 	case RISCV_PMU_INSTRET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 		val = csr_read(CSR_INSTRET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 		WARN_ON_ONCE(idx < 0 ||	idx > RISCV_MAX_COUNTERS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 	return val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) static inline void write_counter(int idx, u64 value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 	/* currently not supported */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 	WARN_ON_ONCE(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208)  * pmu->read: read and update the counter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210)  * Other architectures' implementation often have a xxx_perf_event_update
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211)  * routine, which can return counter values when called in the IRQ, but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212)  * return void when being called by the pmu->read method.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) static void riscv_pmu_read(struct perf_event *event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 	struct hw_perf_event *hwc = &event->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 	u64 prev_raw_count, new_raw_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 	u64 oldval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 	int idx = hwc->idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 	u64 delta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 		prev_raw_count = local64_read(&hwc->prev_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 		new_raw_count = read_counter(idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 		oldval = local64_cmpxchg(&hwc->prev_count, prev_raw_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 					 new_raw_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 	} while (oldval != prev_raw_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 	 * delta is the value to update the counter we maintain in the kernel.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 	delta = (new_raw_count - prev_raw_count) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 		((1ULL << riscv_pmu->counter_width) - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 	local64_add(delta, &event->count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 	 * Something like local64_sub(delta, &hwc->period_left) here is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 	 * needed if there is an interrupt for perf.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243)  * State transition functions:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245)  * stop()/start() & add()/del()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249)  * pmu->stop: stop the counter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) static void riscv_pmu_stop(struct perf_event *event, int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 	struct hw_perf_event *hwc = &event->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 	WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 	hwc->state |= PERF_HES_STOPPED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 	if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 		riscv_pmu->pmu->read(event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 		hwc->state |= PERF_HES_UPTODATE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265)  * pmu->start: start the event.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) static void riscv_pmu_start(struct perf_event *event, int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 	struct hw_perf_event *hwc = &event->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) 	if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) 	if (flags & PERF_EF_RELOAD) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) 		WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) 		 * Set the counter to the period to the next interrupt here,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) 		 * if you have any.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) 	hwc->state = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) 	perf_event_update_userpage(event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) 	 * Since we cannot write to counters, this serves as an initialization
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) 	 * to the delta-mechanism in pmu->read(); otherwise, the delta would be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) 	 * wrong when pmu->read is called for the first time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) 	local64_set(&hwc->prev_count, read_counter(hwc->idx));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295)  * pmu->add: add the event to PMU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) static int riscv_pmu_add(struct perf_event *event, int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) 	struct hw_perf_event *hwc = &event->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) 	if (cpuc->n_events == riscv_pmu->num_counters)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) 		return -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) 	 * We don't have general conunters, so no binding-event-to-counter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) 	 * process here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) 	 * Indexing using hwc->config generally not works, since config may
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) 	 * contain extra information, but here the only info we have in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) 	 * hwc->config is the event index.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) 	hwc->idx = hwc->config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) 	cpuc->events[hwc->idx] = event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) 	cpuc->n_events++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) 	hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) 	if (flags & PERF_EF_START)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) 		riscv_pmu->pmu->start(event, PERF_EF_RELOAD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326)  * pmu->del: delete the event from PMU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) static void riscv_pmu_del(struct perf_event *event, int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) 	struct hw_perf_event *hwc = &event->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) 	cpuc->events[hwc->idx] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) 	cpuc->n_events--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) 	riscv_pmu->pmu->stop(event, PERF_EF_UPDATE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) 	perf_event_update_userpage(event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340)  * Interrupt: a skeletion for reference.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) static DEFINE_MUTEX(pmc_reserve_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) static irqreturn_t riscv_base_pmu_handle_irq(int irq_num, void *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) 	return IRQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) static int reserve_pmc_hardware(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) 	int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) 	mutex_lock(&pmc_reserve_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) 	if (riscv_pmu->irq >= 0 && riscv_pmu->handle_irq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) 		err = request_irq(riscv_pmu->irq, riscv_pmu->handle_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) 				  IRQF_PERCPU, "riscv-base-perf", NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) 	mutex_unlock(&pmc_reserve_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) static void release_pmc_hardware(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) 	mutex_lock(&pmc_reserve_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) 	if (riscv_pmu->irq >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) 		free_irq(riscv_pmu->irq, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) 	mutex_unlock(&pmc_reserve_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373)  * Event Initialization/Finalization
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) static atomic_t riscv_active_events = ATOMIC_INIT(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) static void riscv_event_destroy(struct perf_event *event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) 	if (atomic_dec_return(&riscv_active_events) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) 		release_pmc_hardware();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) static int riscv_event_init(struct perf_event *event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) 	struct perf_event_attr *attr = &event->attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) 	struct hw_perf_event *hwc = &event->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) 	int code;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) 	if (atomic_inc_return(&riscv_active_events) == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) 		err = reserve_pmc_hardware();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) 		if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) 			pr_warn("PMC hardware not available\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) 			atomic_dec(&riscv_active_events);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) 			return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) 	switch (event->attr.type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) 	case PERF_TYPE_HARDWARE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) 		code = riscv_pmu->map_hw_event(attr->config);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) 	case PERF_TYPE_HW_CACHE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) 		code = riscv_pmu->map_cache_event(attr->config);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) 	case PERF_TYPE_RAW:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) 		return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) 		return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) 	event->destroy = riscv_event_destroy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) 	if (code < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) 		event->destroy(event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) 		return code;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) 	 * idx is set to -1 because the index of a general event should not be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) 	 * decided until binding to some counter in pmu->add().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) 	 * But since we don't have such support, later in pmu->add(), we just
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) 	 * use hwc->config as the index instead.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) 	hwc->config = code;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) 	hwc->idx = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434)  * Initialization
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) static struct pmu min_pmu = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) 	.name		= "riscv-base",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) 	.event_init	= riscv_event_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) 	.add		= riscv_pmu_add,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) 	.del		= riscv_pmu_del,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) 	.start		= riscv_pmu_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) 	.stop		= riscv_pmu_stop,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) 	.read		= riscv_pmu_read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) static const struct riscv_pmu riscv_base_pmu = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) 	.pmu = &min_pmu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) 	.max_events = ARRAY_SIZE(riscv_hw_event_map),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) 	.map_hw_event = riscv_map_hw_event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) 	.hw_events = riscv_hw_event_map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) 	.map_cache_event = riscv_map_cache_event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) 	.cache_events = &riscv_cache_event_map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) 	.counter_width = 63,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) 	.num_counters = RISCV_BASE_COUNTERS + 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) 	.handle_irq = &riscv_base_pmu_handle_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) 	/* This means this PMU has no IRQ. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) 	.irq = -1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) static const struct of_device_id riscv_pmu_of_ids[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) 	{.compatible = "riscv,base-pmu",	.data = &riscv_base_pmu},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) 	{ /* sentinel value */ }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) static int __init init_hw_perf_events(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) 	struct device_node *node = of_find_node_by_type(NULL, "pmu");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) 	const struct of_device_id *of_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) 	riscv_pmu = &riscv_base_pmu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) 	if (node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) 		of_id = of_match_node(riscv_pmu_of_ids, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) 		if (of_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) 			riscv_pmu = of_id->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) 		of_node_put(node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) 	perf_pmu_register(riscv_pmu->pmu, "cpu", PERF_TYPE_RAW);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) arch_initcall(init_hw_perf_events);