^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Driver for FPGA Management Engine (FME) Global Performance Reporting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright 2019 Intel Corporation, Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Authors:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * Kang Luwei <luwei.kang@intel.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * Xiao Guangrong <guangrong.xiao@linux.intel.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * Wu Hao <hao.wu@intel.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * Xu Yilun <yilun.xu@intel.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * Joseph Grecco <joe.grecco@intel.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * Enno Luebbers <enno.luebbers@intel.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * Tim Whisonant <tim.whisonant@intel.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * Ananda Ravuri <ananda.ravuri@intel.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * Mitchel, Henry <henry.mitchel@intel.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/perf_event.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include "dfl.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include "dfl-fme.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) * Performance Counter Registers for Cache.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) * Cache Events are listed below as CACHE_EVNT_*.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #define CACHE_CTRL 0x8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #define CACHE_RESET_CNTR BIT_ULL(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #define CACHE_FREEZE_CNTR BIT_ULL(8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #define CACHE_CTRL_EVNT GENMASK_ULL(19, 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #define CACHE_EVNT_RD_HIT 0x0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #define CACHE_EVNT_WR_HIT 0x1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #define CACHE_EVNT_RD_MISS 0x2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #define CACHE_EVNT_WR_MISS 0x3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #define CACHE_EVNT_RSVD 0x4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #define CACHE_EVNT_HOLD_REQ 0x5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #define CACHE_EVNT_DATA_WR_PORT_CONTEN 0x6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #define CACHE_EVNT_TAG_WR_PORT_CONTEN 0x7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #define CACHE_EVNT_TX_REQ_STALL 0x8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #define CACHE_EVNT_RX_REQ_STALL 0x9
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #define CACHE_EVNT_EVICTIONS 0xa
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #define CACHE_EVNT_MAX CACHE_EVNT_EVICTIONS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) #define CACHE_CHANNEL_SEL BIT_ULL(20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #define CACHE_CHANNEL_RD 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) #define CACHE_CHANNEL_WR 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) #define CACHE_CNTR0 0x10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) #define CACHE_CNTR1 0x18
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) #define CACHE_CNTR_EVNT_CNTR GENMASK_ULL(47, 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) #define CACHE_CNTR_EVNT GENMASK_ULL(63, 60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) * Performance Counter Registers for Fabric.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) * Fabric Events are listed below as FAB_EVNT_*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) #define FAB_CTRL 0x20
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) #define FAB_RESET_CNTR BIT_ULL(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) #define FAB_FREEZE_CNTR BIT_ULL(8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) #define FAB_CTRL_EVNT GENMASK_ULL(19, 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) #define FAB_EVNT_PCIE0_RD 0x0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) #define FAB_EVNT_PCIE0_WR 0x1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) #define FAB_EVNT_PCIE1_RD 0x2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) #define FAB_EVNT_PCIE1_WR 0x3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) #define FAB_EVNT_UPI_RD 0x4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) #define FAB_EVNT_UPI_WR 0x5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) #define FAB_EVNT_MMIO_RD 0x6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) #define FAB_EVNT_MMIO_WR 0x7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) #define FAB_EVNT_MAX FAB_EVNT_MMIO_WR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) #define FAB_PORT_ID GENMASK_ULL(21, 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) #define FAB_PORT_FILTER BIT_ULL(23)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) #define FAB_PORT_FILTER_DISABLE 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) #define FAB_PORT_FILTER_ENABLE 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) #define FAB_CNTR 0x28
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) #define FAB_CNTR_EVNT_CNTR GENMASK_ULL(59, 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) #define FAB_CNTR_EVNT GENMASK_ULL(63, 60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) * Performance Counter Registers for Clock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) * Clock Counter can't be reset or frozen by SW.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) #define CLK_CNTR 0x30
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) #define BASIC_EVNT_CLK 0x0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) #define BASIC_EVNT_MAX BASIC_EVNT_CLK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) * Performance Counter Registers for IOMMU / VT-D.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) * VT-D Events are listed below as VTD_EVNT_* and VTD_SIP_EVNT_*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) #define VTD_CTRL 0x38
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) #define VTD_RESET_CNTR BIT_ULL(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) #define VTD_FREEZE_CNTR BIT_ULL(8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) #define VTD_CTRL_EVNT GENMASK_ULL(19, 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) #define VTD_EVNT_AFU_MEM_RD_TRANS 0x0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) #define VTD_EVNT_AFU_MEM_WR_TRANS 0x1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) #define VTD_EVNT_AFU_DEVTLB_RD_HIT 0x2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) #define VTD_EVNT_AFU_DEVTLB_WR_HIT 0x3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) #define VTD_EVNT_DEVTLB_4K_FILL 0x4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) #define VTD_EVNT_DEVTLB_2M_FILL 0x5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) #define VTD_EVNT_DEVTLB_1G_FILL 0x6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) #define VTD_EVNT_MAX VTD_EVNT_DEVTLB_1G_FILL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) #define VTD_CNTR 0x40
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) #define VTD_CNTR_EVNT_CNTR GENMASK_ULL(47, 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) #define VTD_CNTR_EVNT GENMASK_ULL(63, 60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) #define VTD_SIP_CTRL 0x48
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) #define VTD_SIP_RESET_CNTR BIT_ULL(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) #define VTD_SIP_FREEZE_CNTR BIT_ULL(8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) #define VTD_SIP_CTRL_EVNT GENMASK_ULL(19, 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) #define VTD_SIP_EVNT_IOTLB_4K_HIT 0x0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) #define VTD_SIP_EVNT_IOTLB_2M_HIT 0x1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) #define VTD_SIP_EVNT_IOTLB_1G_HIT 0x2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) #define VTD_SIP_EVNT_SLPWC_L3_HIT 0x3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) #define VTD_SIP_EVNT_SLPWC_L4_HIT 0x4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) #define VTD_SIP_EVNT_RCC_HIT 0x5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) #define VTD_SIP_EVNT_IOTLB_4K_MISS 0x6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) #define VTD_SIP_EVNT_IOTLB_2M_MISS 0x7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) #define VTD_SIP_EVNT_IOTLB_1G_MISS 0x8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) #define VTD_SIP_EVNT_SLPWC_L3_MISS 0x9
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) #define VTD_SIP_EVNT_SLPWC_L4_MISS 0xa
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) #define VTD_SIP_EVNT_RCC_MISS 0xb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) #define VTD_SIP_EVNT_MAX VTD_SIP_EVNT_SLPWC_L4_MISS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) #define VTD_SIP_CNTR 0X50
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) #define VTD_SIP_CNTR_EVNT_CNTR GENMASK_ULL(47, 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) #define VTD_SIP_CNTR_EVNT GENMASK_ULL(63, 60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) #define PERF_TIMEOUT 30
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) #define PERF_MAX_PORT_NUM 1U
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) * struct fme_perf_priv - priv data structure for fme perf driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) * @dev: parent device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) * @ioaddr: mapped base address of mmio region.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) * @pmu: pmu data structure for fme perf counters.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) * @id: id of this fme performance report private feature.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) * @fab_users: current user number on fabric counters.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) * @fab_port_id: used to indicate current working mode of fabric counters.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) * @fab_lock: lock to protect fabric counters working mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) * @cpu: active CPU to which the PMU is bound for accesses.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) * @cpuhp_node: node for CPU hotplug notifier link.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) * @cpuhp_state: state for CPU hotplug notification;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) struct fme_perf_priv {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) struct device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) void __iomem *ioaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) struct pmu pmu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) u16 id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) u32 fab_users;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) u32 fab_port_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) spinlock_t fab_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) unsigned int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) struct hlist_node node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) enum cpuhp_state cpuhp_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) * struct fme_perf_event_ops - callbacks for fme perf events
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) * @event_init: callback invoked during event init.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) * @event_destroy: callback invoked during event destroy.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) * @read_counter: callback to read hardware counters.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) struct fme_perf_event_ops {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) int (*event_init)(struct fme_perf_priv *priv, u32 event, u32 portid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) void (*event_destroy)(struct fme_perf_priv *priv, u32 event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) u32 portid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) u64 (*read_counter)(struct fme_perf_priv *priv, u32 event, u32 portid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) #define to_fme_perf_priv(_pmu) container_of(_pmu, struct fme_perf_priv, pmu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) static ssize_t cpumask_show(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) struct device_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) struct pmu *pmu = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) struct fme_perf_priv *priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) priv = to_fme_perf_priv(pmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) return cpumap_print_to_pagebuf(true, buf, cpumask_of(priv->cpu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) static DEVICE_ATTR_RO(cpumask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) static struct attribute *fme_perf_cpumask_attrs[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) &dev_attr_cpumask.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) static struct attribute_group fme_perf_cpumask_group = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) .attrs = fme_perf_cpumask_attrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) #define FME_EVENT_MASK GENMASK_ULL(11, 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) #define FME_EVENT_SHIFT 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) #define FME_EVTYPE_MASK GENMASK_ULL(15, 12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) #define FME_EVTYPE_SHIFT 12
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) #define FME_EVTYPE_BASIC 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) #define FME_EVTYPE_CACHE 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) #define FME_EVTYPE_FABRIC 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) #define FME_EVTYPE_VTD 3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) #define FME_EVTYPE_VTD_SIP 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) #define FME_EVTYPE_MAX FME_EVTYPE_VTD_SIP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) #define FME_PORTID_MASK GENMASK_ULL(23, 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) #define FME_PORTID_SHIFT 16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) #define FME_PORTID_ROOT (0xffU)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) #define get_event(_config) FIELD_GET(FME_EVENT_MASK, _config)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) #define get_evtype(_config) FIELD_GET(FME_EVTYPE_MASK, _config)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) #define get_portid(_config) FIELD_GET(FME_PORTID_MASK, _config)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) PMU_FORMAT_ATTR(event, "config:0-11");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) PMU_FORMAT_ATTR(evtype, "config:12-15");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) PMU_FORMAT_ATTR(portid, "config:16-23");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) static struct attribute *fme_perf_format_attrs[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) &format_attr_event.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) &format_attr_evtype.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) &format_attr_portid.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) static struct attribute_group fme_perf_format_group = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) .name = "format",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) .attrs = fme_perf_format_attrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) * There are no default events, but we need to create
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) * "events" group (with empty attrs) before updating
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) * it with detected events (using pmu->attr_update).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) static struct attribute *fme_perf_events_attrs_empty[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) static struct attribute_group fme_perf_events_group = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) .name = "events",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) .attrs = fme_perf_events_attrs_empty,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) static const struct attribute_group *fme_perf_groups[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) &fme_perf_format_group,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) &fme_perf_cpumask_group,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) &fme_perf_events_group,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) static bool is_portid_root(u32 portid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) return portid == FME_PORTID_ROOT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) static bool is_portid_port(u32 portid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) return portid < PERF_MAX_PORT_NUM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) static bool is_portid_root_or_port(u32 portid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) return is_portid_root(portid) || is_portid_port(portid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) static u64 fme_read_perf_cntr_reg(void __iomem *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) u32 low;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) u64 v;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) * For 64bit counter registers, the counter may increases and carries
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) * out of bit [31] between 2 32bit reads. So add extra reads to help
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) * to prevent this issue. This only happens in platforms which don't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) * support 64bit read - readq is split into 2 readl.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) v = readq(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) low = readl(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) } while (((u32)v) > low);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) return v;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) static int basic_event_init(struct fme_perf_priv *priv, u32 event, u32 portid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) if (event <= BASIC_EVNT_MAX && is_portid_root(portid))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) static u64 basic_read_event_counter(struct fme_perf_priv *priv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) u32 event, u32 portid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) void __iomem *base = priv->ioaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) return fme_read_perf_cntr_reg(base + CLK_CNTR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) static int cache_event_init(struct fme_perf_priv *priv, u32 event, u32 portid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) if (priv->id == FME_FEATURE_ID_GLOBAL_IPERF &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) event <= CACHE_EVNT_MAX && is_portid_root(portid))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) static u64 cache_read_event_counter(struct fme_perf_priv *priv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) u32 event, u32 portid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) void __iomem *base = priv->ioaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) u64 v, count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) u8 channel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) if (event == CACHE_EVNT_WR_HIT || event == CACHE_EVNT_WR_MISS ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) event == CACHE_EVNT_DATA_WR_PORT_CONTEN ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) event == CACHE_EVNT_TAG_WR_PORT_CONTEN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) channel = CACHE_CHANNEL_WR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) channel = CACHE_CHANNEL_RD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) /* set channel access type and cache event code. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) v = readq(base + CACHE_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) v &= ~(CACHE_CHANNEL_SEL | CACHE_CTRL_EVNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) v |= FIELD_PREP(CACHE_CHANNEL_SEL, channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) v |= FIELD_PREP(CACHE_CTRL_EVNT, event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) writeq(v, base + CACHE_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) if (readq_poll_timeout_atomic(base + CACHE_CNTR0, v,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) FIELD_GET(CACHE_CNTR_EVNT, v) == event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) 1, PERF_TIMEOUT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) dev_err(priv->dev, "timeout, unmatched cache event code in counter register.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) v = fme_read_perf_cntr_reg(base + CACHE_CNTR0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) count = FIELD_GET(CACHE_CNTR_EVNT_CNTR, v);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) v = fme_read_perf_cntr_reg(base + CACHE_CNTR1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) count += FIELD_GET(CACHE_CNTR_EVNT_CNTR, v);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) static bool is_fabric_event_supported(struct fme_perf_priv *priv, u32 event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) u32 portid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) if (event > FAB_EVNT_MAX || !is_portid_root_or_port(portid))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) if (priv->id == FME_FEATURE_ID_GLOBAL_DPERF &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) (event == FAB_EVNT_PCIE1_RD || event == FAB_EVNT_UPI_RD ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) event == FAB_EVNT_PCIE1_WR || event == FAB_EVNT_UPI_WR))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) static int fabric_event_init(struct fme_perf_priv *priv, u32 event, u32 portid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) void __iomem *base = priv->ioaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) u64 v;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) if (!is_fabric_event_supported(priv, event, portid))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) * as fabric counter set only can be in either overall or port mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) * In overall mode, it counts overall data for FPGA, and in port mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) * it is configured to monitor on one individual port.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) * so every time, a new event is initialized, driver checks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) * current working mode and if someone is using this counter set.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) spin_lock(&priv->fab_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) if (priv->fab_users && priv->fab_port_id != portid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) dev_dbg(priv->dev, "conflict fabric event monitoring mode.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) ret = -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) goto exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) priv->fab_users++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) * skip if current working mode matches, otherwise change the working
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) * mode per input port_id, to monitor overall data or another port.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) if (priv->fab_port_id == portid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) goto exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) priv->fab_port_id = portid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) v = readq(base + FAB_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) v &= ~(FAB_PORT_FILTER | FAB_PORT_ID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) if (is_portid_root(portid)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) v |= FIELD_PREP(FAB_PORT_FILTER, FAB_PORT_FILTER_DISABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) v |= FIELD_PREP(FAB_PORT_FILTER, FAB_PORT_FILTER_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) v |= FIELD_PREP(FAB_PORT_ID, portid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) writeq(v, base + FAB_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) exit:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) spin_unlock(&priv->fab_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) static void fabric_event_destroy(struct fme_perf_priv *priv, u32 event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) u32 portid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) spin_lock(&priv->fab_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) priv->fab_users--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) spin_unlock(&priv->fab_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) static u64 fabric_read_event_counter(struct fme_perf_priv *priv, u32 event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) u32 portid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) void __iomem *base = priv->ioaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) u64 v;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) v = readq(base + FAB_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) v &= ~FAB_CTRL_EVNT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) v |= FIELD_PREP(FAB_CTRL_EVNT, event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) writeq(v, base + FAB_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) if (readq_poll_timeout_atomic(base + FAB_CNTR, v,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) FIELD_GET(FAB_CNTR_EVNT, v) == event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) 1, PERF_TIMEOUT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) dev_err(priv->dev, "timeout, unmatched fab event code in counter register.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) v = fme_read_perf_cntr_reg(base + FAB_CNTR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) return FIELD_GET(FAB_CNTR_EVNT_CNTR, v);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) static int vtd_event_init(struct fme_perf_priv *priv, u32 event, u32 portid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) if (priv->id == FME_FEATURE_ID_GLOBAL_IPERF &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) event <= VTD_EVNT_MAX && is_portid_port(portid))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) static u64 vtd_read_event_counter(struct fme_perf_priv *priv, u32 event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) u32 portid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) void __iomem *base = priv->ioaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) u64 v;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) event += (portid * (VTD_EVNT_MAX + 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) v = readq(base + VTD_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) v &= ~VTD_CTRL_EVNT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) v |= FIELD_PREP(VTD_CTRL_EVNT, event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) writeq(v, base + VTD_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) if (readq_poll_timeout_atomic(base + VTD_CNTR, v,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) FIELD_GET(VTD_CNTR_EVNT, v) == event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) 1, PERF_TIMEOUT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) dev_err(priv->dev, "timeout, unmatched vtd event code in counter register.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) v = fme_read_perf_cntr_reg(base + VTD_CNTR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) return FIELD_GET(VTD_CNTR_EVNT_CNTR, v);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) static int vtd_sip_event_init(struct fme_perf_priv *priv, u32 event, u32 portid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) if (priv->id == FME_FEATURE_ID_GLOBAL_IPERF &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) event <= VTD_SIP_EVNT_MAX && is_portid_root(portid))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) static u64 vtd_sip_read_event_counter(struct fme_perf_priv *priv, u32 event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) u32 portid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) void __iomem *base = priv->ioaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) u64 v;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) v = readq(base + VTD_SIP_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) v &= ~VTD_SIP_CTRL_EVNT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) v |= FIELD_PREP(VTD_SIP_CTRL_EVNT, event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) writeq(v, base + VTD_SIP_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) if (readq_poll_timeout_atomic(base + VTD_SIP_CNTR, v,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) FIELD_GET(VTD_SIP_CNTR_EVNT, v) == event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) 1, PERF_TIMEOUT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) dev_err(priv->dev, "timeout, unmatched vtd sip event code in counter register\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) v = fme_read_perf_cntr_reg(base + VTD_SIP_CNTR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) return FIELD_GET(VTD_SIP_CNTR_EVNT_CNTR, v);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) static struct fme_perf_event_ops fme_perf_event_ops[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) [FME_EVTYPE_BASIC] = {.event_init = basic_event_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) .read_counter = basic_read_event_counter,},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) [FME_EVTYPE_CACHE] = {.event_init = cache_event_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) .read_counter = cache_read_event_counter,},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) [FME_EVTYPE_FABRIC] = {.event_init = fabric_event_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) .event_destroy = fabric_event_destroy,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) .read_counter = fabric_read_event_counter,},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) [FME_EVTYPE_VTD] = {.event_init = vtd_event_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) .read_counter = vtd_read_event_counter,},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) [FME_EVTYPE_VTD_SIP] = {.event_init = vtd_sip_event_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) .read_counter = vtd_sip_read_event_counter,},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) static ssize_t fme_perf_event_show(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) struct device_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) struct dev_ext_attribute *eattr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) unsigned long config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) char *ptr = buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) eattr = container_of(attr, struct dev_ext_attribute, attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) config = (unsigned long)eattr->var;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) ptr += sprintf(ptr, "event=0x%02x", (unsigned int)get_event(config));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) ptr += sprintf(ptr, ",evtype=0x%02x", (unsigned int)get_evtype(config));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) if (is_portid_root(get_portid(config)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) ptr += sprintf(ptr, ",portid=0x%02x\n", FME_PORTID_ROOT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) ptr += sprintf(ptr, ",portid=?\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) return (ssize_t)(ptr - buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) #define FME_EVENT_ATTR(_name) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) __ATTR(_name, 0444, fme_perf_event_show, NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) #define FME_PORT_EVENT_CONFIG(_event, _type) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) (void *)((((_event) << FME_EVENT_SHIFT) & FME_EVENT_MASK) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) (((_type) << FME_EVTYPE_SHIFT) & FME_EVTYPE_MASK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) #define FME_EVENT_CONFIG(_event, _type) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) (void *)((((_event) << FME_EVENT_SHIFT) & FME_EVENT_MASK) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) (((_type) << FME_EVTYPE_SHIFT) & FME_EVTYPE_MASK) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) (FME_PORTID_ROOT << FME_PORTID_SHIFT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) /* FME Perf Basic Events */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) #define FME_EVENT_BASIC(_name, _event) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) static struct dev_ext_attribute fme_perf_event_##_name = { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) .attr = FME_EVENT_ATTR(_name), \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) .var = FME_EVENT_CONFIG(_event, FME_EVTYPE_BASIC), \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) FME_EVENT_BASIC(clock, BASIC_EVNT_CLK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) static struct attribute *fme_perf_basic_events_attrs[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) &fme_perf_event_clock.attr.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) static const struct attribute_group fme_perf_basic_events_group = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) .name = "events",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) .attrs = fme_perf_basic_events_attrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) /* FME Perf Cache Events */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) #define FME_EVENT_CACHE(_name, _event) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) static struct dev_ext_attribute fme_perf_event_cache_##_name = { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) .attr = FME_EVENT_ATTR(cache_##_name), \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) .var = FME_EVENT_CONFIG(_event, FME_EVTYPE_CACHE), \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) FME_EVENT_CACHE(read_hit, CACHE_EVNT_RD_HIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) FME_EVENT_CACHE(read_miss, CACHE_EVNT_RD_MISS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) FME_EVENT_CACHE(write_hit, CACHE_EVNT_WR_HIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) FME_EVENT_CACHE(write_miss, CACHE_EVNT_WR_MISS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) FME_EVENT_CACHE(hold_request, CACHE_EVNT_HOLD_REQ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) FME_EVENT_CACHE(tx_req_stall, CACHE_EVNT_TX_REQ_STALL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) FME_EVENT_CACHE(rx_req_stall, CACHE_EVNT_RX_REQ_STALL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) FME_EVENT_CACHE(eviction, CACHE_EVNT_EVICTIONS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) FME_EVENT_CACHE(data_write_port_contention, CACHE_EVNT_DATA_WR_PORT_CONTEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) FME_EVENT_CACHE(tag_write_port_contention, CACHE_EVNT_TAG_WR_PORT_CONTEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) static struct attribute *fme_perf_cache_events_attrs[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) &fme_perf_event_cache_read_hit.attr.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) &fme_perf_event_cache_read_miss.attr.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) &fme_perf_event_cache_write_hit.attr.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) &fme_perf_event_cache_write_miss.attr.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) &fme_perf_event_cache_hold_request.attr.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) &fme_perf_event_cache_tx_req_stall.attr.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) &fme_perf_event_cache_rx_req_stall.attr.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) &fme_perf_event_cache_eviction.attr.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) &fme_perf_event_cache_data_write_port_contention.attr.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) &fme_perf_event_cache_tag_write_port_contention.attr.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) static umode_t fme_perf_events_visible(struct kobject *kobj,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) struct attribute *attr, int n)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) struct pmu *pmu = dev_get_drvdata(kobj_to_dev(kobj));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) struct fme_perf_priv *priv = to_fme_perf_priv(pmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) return (priv->id == FME_FEATURE_ID_GLOBAL_IPERF) ? attr->mode : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) static const struct attribute_group fme_perf_cache_events_group = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) .name = "events",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) .attrs = fme_perf_cache_events_attrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) .is_visible = fme_perf_events_visible,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) /* FME Perf Fabric Events */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) #define FME_EVENT_FABRIC(_name, _event) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) static struct dev_ext_attribute fme_perf_event_fab_##_name = { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) .attr = FME_EVENT_ATTR(fab_##_name), \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) .var = FME_EVENT_CONFIG(_event, FME_EVTYPE_FABRIC), \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) #define FME_EVENT_FABRIC_PORT(_name, _event) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) static struct dev_ext_attribute fme_perf_event_fab_port_##_name = { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) .attr = FME_EVENT_ATTR(fab_port_##_name), \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) .var = FME_PORT_EVENT_CONFIG(_event, FME_EVTYPE_FABRIC), \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) FME_EVENT_FABRIC(pcie0_read, FAB_EVNT_PCIE0_RD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) FME_EVENT_FABRIC(pcie0_write, FAB_EVNT_PCIE0_WR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) FME_EVENT_FABRIC(pcie1_read, FAB_EVNT_PCIE1_RD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) FME_EVENT_FABRIC(pcie1_write, FAB_EVNT_PCIE1_WR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) FME_EVENT_FABRIC(upi_read, FAB_EVNT_UPI_RD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) FME_EVENT_FABRIC(upi_write, FAB_EVNT_UPI_WR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) FME_EVENT_FABRIC(mmio_read, FAB_EVNT_MMIO_RD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) FME_EVENT_FABRIC(mmio_write, FAB_EVNT_MMIO_WR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) FME_EVENT_FABRIC_PORT(pcie0_read, FAB_EVNT_PCIE0_RD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) FME_EVENT_FABRIC_PORT(pcie0_write, FAB_EVNT_PCIE0_WR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) FME_EVENT_FABRIC_PORT(pcie1_read, FAB_EVNT_PCIE1_RD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) FME_EVENT_FABRIC_PORT(pcie1_write, FAB_EVNT_PCIE1_WR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) FME_EVENT_FABRIC_PORT(upi_read, FAB_EVNT_UPI_RD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) FME_EVENT_FABRIC_PORT(upi_write, FAB_EVNT_UPI_WR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) FME_EVENT_FABRIC_PORT(mmio_read, FAB_EVNT_MMIO_RD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) FME_EVENT_FABRIC_PORT(mmio_write, FAB_EVNT_MMIO_WR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) static struct attribute *fme_perf_fabric_events_attrs[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) &fme_perf_event_fab_pcie0_read.attr.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) &fme_perf_event_fab_pcie0_write.attr.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) &fme_perf_event_fab_pcie1_read.attr.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) &fme_perf_event_fab_pcie1_write.attr.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) &fme_perf_event_fab_upi_read.attr.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) &fme_perf_event_fab_upi_write.attr.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) &fme_perf_event_fab_mmio_read.attr.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) &fme_perf_event_fab_mmio_write.attr.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) &fme_perf_event_fab_port_pcie0_read.attr.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) &fme_perf_event_fab_port_pcie0_write.attr.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) &fme_perf_event_fab_port_pcie1_read.attr.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) &fme_perf_event_fab_port_pcie1_write.attr.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) &fme_perf_event_fab_port_upi_read.attr.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) &fme_perf_event_fab_port_upi_write.attr.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) &fme_perf_event_fab_port_mmio_read.attr.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) &fme_perf_event_fab_port_mmio_write.attr.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) static umode_t fme_perf_fabric_events_visible(struct kobject *kobj,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) struct attribute *attr, int n)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) struct pmu *pmu = dev_get_drvdata(kobj_to_dev(kobj));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) struct fme_perf_priv *priv = to_fme_perf_priv(pmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) struct dev_ext_attribute *eattr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) unsigned long var;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) eattr = container_of(attr, struct dev_ext_attribute, attr.attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) var = (unsigned long)eattr->var;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) if (is_fabric_event_supported(priv, get_event(var), get_portid(var)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) return attr->mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) static const struct attribute_group fme_perf_fabric_events_group = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) .name = "events",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) .attrs = fme_perf_fabric_events_attrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) .is_visible = fme_perf_fabric_events_visible,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) /* FME Perf VTD Events */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) #define FME_EVENT_VTD_PORT(_name, _event) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) static struct dev_ext_attribute fme_perf_event_vtd_port_##_name = { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) .attr = FME_EVENT_ATTR(vtd_port_##_name), \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) .var = FME_PORT_EVENT_CONFIG(_event, FME_EVTYPE_VTD), \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) FME_EVENT_VTD_PORT(read_transaction, VTD_EVNT_AFU_MEM_RD_TRANS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) FME_EVENT_VTD_PORT(write_transaction, VTD_EVNT_AFU_MEM_WR_TRANS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) FME_EVENT_VTD_PORT(devtlb_read_hit, VTD_EVNT_AFU_DEVTLB_RD_HIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) FME_EVENT_VTD_PORT(devtlb_write_hit, VTD_EVNT_AFU_DEVTLB_WR_HIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) FME_EVENT_VTD_PORT(devtlb_4k_fill, VTD_EVNT_DEVTLB_4K_FILL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) FME_EVENT_VTD_PORT(devtlb_2m_fill, VTD_EVNT_DEVTLB_2M_FILL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) FME_EVENT_VTD_PORT(devtlb_1g_fill, VTD_EVNT_DEVTLB_1G_FILL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) static struct attribute *fme_perf_vtd_events_attrs[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) &fme_perf_event_vtd_port_read_transaction.attr.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) &fme_perf_event_vtd_port_write_transaction.attr.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) &fme_perf_event_vtd_port_devtlb_read_hit.attr.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) &fme_perf_event_vtd_port_devtlb_write_hit.attr.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) &fme_perf_event_vtd_port_devtlb_4k_fill.attr.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) &fme_perf_event_vtd_port_devtlb_2m_fill.attr.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) &fme_perf_event_vtd_port_devtlb_1g_fill.attr.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) static const struct attribute_group fme_perf_vtd_events_group = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) .name = "events",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) .attrs = fme_perf_vtd_events_attrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) .is_visible = fme_perf_events_visible,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) /* FME Perf VTD SIP Events */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) #define FME_EVENT_VTD_SIP(_name, _event) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) static struct dev_ext_attribute fme_perf_event_vtd_sip_##_name = { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) .attr = FME_EVENT_ATTR(vtd_sip_##_name), \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) .var = FME_EVENT_CONFIG(_event, FME_EVTYPE_VTD_SIP), \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) FME_EVENT_VTD_SIP(iotlb_4k_hit, VTD_SIP_EVNT_IOTLB_4K_HIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) FME_EVENT_VTD_SIP(iotlb_2m_hit, VTD_SIP_EVNT_IOTLB_2M_HIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) FME_EVENT_VTD_SIP(iotlb_1g_hit, VTD_SIP_EVNT_IOTLB_1G_HIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) FME_EVENT_VTD_SIP(slpwc_l3_hit, VTD_SIP_EVNT_SLPWC_L3_HIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) FME_EVENT_VTD_SIP(slpwc_l4_hit, VTD_SIP_EVNT_SLPWC_L4_HIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) FME_EVENT_VTD_SIP(rcc_hit, VTD_SIP_EVNT_RCC_HIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) FME_EVENT_VTD_SIP(iotlb_4k_miss, VTD_SIP_EVNT_IOTLB_4K_MISS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) FME_EVENT_VTD_SIP(iotlb_2m_miss, VTD_SIP_EVNT_IOTLB_2M_MISS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) FME_EVENT_VTD_SIP(iotlb_1g_miss, VTD_SIP_EVNT_IOTLB_1G_MISS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) FME_EVENT_VTD_SIP(slpwc_l3_miss, VTD_SIP_EVNT_SLPWC_L3_MISS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) FME_EVENT_VTD_SIP(slpwc_l4_miss, VTD_SIP_EVNT_SLPWC_L4_MISS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) FME_EVENT_VTD_SIP(rcc_miss, VTD_SIP_EVNT_RCC_MISS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) static struct attribute *fme_perf_vtd_sip_events_attrs[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) &fme_perf_event_vtd_sip_iotlb_4k_hit.attr.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) &fme_perf_event_vtd_sip_iotlb_2m_hit.attr.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) &fme_perf_event_vtd_sip_iotlb_1g_hit.attr.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) &fme_perf_event_vtd_sip_slpwc_l3_hit.attr.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) &fme_perf_event_vtd_sip_slpwc_l4_hit.attr.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) &fme_perf_event_vtd_sip_rcc_hit.attr.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) &fme_perf_event_vtd_sip_iotlb_4k_miss.attr.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) &fme_perf_event_vtd_sip_iotlb_2m_miss.attr.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) &fme_perf_event_vtd_sip_iotlb_1g_miss.attr.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) &fme_perf_event_vtd_sip_slpwc_l3_miss.attr.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) &fme_perf_event_vtd_sip_slpwc_l4_miss.attr.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) &fme_perf_event_vtd_sip_rcc_miss.attr.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) static const struct attribute_group fme_perf_vtd_sip_events_group = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) .name = "events",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) .attrs = fme_perf_vtd_sip_events_attrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) .is_visible = fme_perf_events_visible,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) static const struct attribute_group *fme_perf_events_groups[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) &fme_perf_basic_events_group,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) &fme_perf_cache_events_group,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) &fme_perf_fabric_events_group,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) &fme_perf_vtd_events_group,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) &fme_perf_vtd_sip_events_group,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) static struct fme_perf_event_ops *get_event_ops(u32 evtype)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) if (evtype > FME_EVTYPE_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) return &fme_perf_event_ops[evtype];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) static void fme_perf_event_destroy(struct perf_event *event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) struct fme_perf_event_ops *ops = get_event_ops(event->hw.event_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) struct fme_perf_priv *priv = to_fme_perf_priv(event->pmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) if (ops->event_destroy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) ops->event_destroy(priv, event->hw.idx, event->hw.config_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) static int fme_perf_event_init(struct perf_event *event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) struct fme_perf_priv *priv = to_fme_perf_priv(event->pmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) struct hw_perf_event *hwc = &event->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) struct fme_perf_event_ops *ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) u32 eventid, evtype, portid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) /* test the event attr type check for PMU enumeration */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) if (event->attr.type != event->pmu->type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) * fme counters are shared across all cores.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) * Therefore, it does not support per-process mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) * Also, it does not support event sampling mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) if (event->cpu < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) if (event->cpu != priv->cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) eventid = get_event(event->attr.config);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) portid = get_portid(event->attr.config);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) evtype = get_evtype(event->attr.config);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) if (evtype > FME_EVTYPE_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) hwc->event_base = evtype;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) hwc->idx = (int)eventid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) hwc->config_base = portid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) event->destroy = fme_perf_event_destroy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) dev_dbg(priv->dev, "%s event=0x%x, evtype=0x%x, portid=0x%x,\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) __func__, eventid, evtype, portid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) ops = get_event_ops(evtype);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) if (ops->event_init)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) return ops->event_init(priv, eventid, portid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) static void fme_perf_event_update(struct perf_event *event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) struct fme_perf_event_ops *ops = get_event_ops(event->hw.event_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) struct fme_perf_priv *priv = to_fme_perf_priv(event->pmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) struct hw_perf_event *hwc = &event->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) u64 now, prev, delta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) now = ops->read_counter(priv, (u32)hwc->idx, hwc->config_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) prev = local64_read(&hwc->prev_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) delta = now - prev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) local64_add(delta, &event->count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) static void fme_perf_event_start(struct perf_event *event, int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) struct fme_perf_event_ops *ops = get_event_ops(event->hw.event_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) struct fme_perf_priv *priv = to_fme_perf_priv(event->pmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) struct hw_perf_event *hwc = &event->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) u64 count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) count = ops->read_counter(priv, (u32)hwc->idx, hwc->config_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) local64_set(&hwc->prev_count, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) static void fme_perf_event_stop(struct perf_event *event, int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) fme_perf_event_update(event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) static int fme_perf_event_add(struct perf_event *event, int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) if (flags & PERF_EF_START)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) fme_perf_event_start(event, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) static void fme_perf_event_del(struct perf_event *event, int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) fme_perf_event_stop(event, PERF_EF_UPDATE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) static void fme_perf_event_read(struct perf_event *event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) fme_perf_event_update(event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) static void fme_perf_setup_hardware(struct fme_perf_priv *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) void __iomem *base = priv->ioaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) u64 v;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) /* read and save current working mode for fabric counters */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) v = readq(base + FAB_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) if (FIELD_GET(FAB_PORT_FILTER, v) == FAB_PORT_FILTER_DISABLE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) priv->fab_port_id = FME_PORTID_ROOT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) priv->fab_port_id = FIELD_GET(FAB_PORT_ID, v);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) static int fme_perf_pmu_register(struct platform_device *pdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) struct fme_perf_priv *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) struct pmu *pmu = &priv->pmu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) char *name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) spin_lock_init(&priv->fab_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) fme_perf_setup_hardware(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) pmu->task_ctx_nr = perf_invalid_context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) pmu->attr_groups = fme_perf_groups;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) pmu->attr_update = fme_perf_events_groups;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) pmu->event_init = fme_perf_event_init;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) pmu->add = fme_perf_event_add;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) pmu->del = fme_perf_event_del;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) pmu->start = fme_perf_event_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) pmu->stop = fme_perf_event_stop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) pmu->read = fme_perf_event_read;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) pmu->capabilities = PERF_PMU_CAP_NO_INTERRUPT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) PERF_PMU_CAP_NO_EXCLUDE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) name = devm_kasprintf(priv->dev, GFP_KERNEL, "dfl_fme%d", pdev->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) ret = perf_pmu_register(pmu, name, -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) static void fme_perf_pmu_unregister(struct fme_perf_priv *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) perf_pmu_unregister(&priv->pmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) static int fme_perf_offline_cpu(unsigned int cpu, struct hlist_node *node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) struct fme_perf_priv *priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) int target;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) priv = hlist_entry_safe(node, struct fme_perf_priv, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) if (cpu != priv->cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) target = cpumask_any_but(cpu_online_mask, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) if (target >= nr_cpu_ids)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) priv->cpu = target;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) perf_pmu_migrate_context(&priv->pmu, cpu, target);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) static int fme_perf_init(struct platform_device *pdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) struct dfl_feature *feature)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) struct fme_perf_priv *priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) if (!priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) priv->dev = &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) priv->ioaddr = feature->ioaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) priv->id = feature->id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) priv->cpu = raw_smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) "perf/fpga/dfl_fme:online",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) NULL, fme_perf_offline_cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) priv->cpuhp_state = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) /* Register the pmu instance for cpu hotplug */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) ret = cpuhp_state_add_instance_nocalls(priv->cpuhp_state, &priv->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) goto cpuhp_instance_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) ret = fme_perf_pmu_register(pdev, priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) goto pmu_register_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) feature->priv = priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) pmu_register_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) cpuhp_state_remove_instance_nocalls(priv->cpuhp_state, &priv->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) cpuhp_instance_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) cpuhp_remove_multi_state(priv->cpuhp_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) static void fme_perf_uinit(struct platform_device *pdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) struct dfl_feature *feature)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) struct fme_perf_priv *priv = feature->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) fme_perf_pmu_unregister(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) cpuhp_state_remove_instance_nocalls(priv->cpuhp_state, &priv->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) cpuhp_remove_multi_state(priv->cpuhp_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) const struct dfl_feature_id fme_perf_id_table[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) {.id = FME_FEATURE_ID_GLOBAL_IPERF,},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) {.id = FME_FEATURE_ID_GLOBAL_DPERF,},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) {0,}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) const struct dfl_feature_ops fme_perf_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) .init = fme_perf_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) .uinit = fme_perf_uinit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) };