^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) * Netburst Performance Events (P4, old Xeon)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Copyright (C) 2010 Parallels, Inc., Cyrill Gorcunov <gorcunov@openvz.org>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (C) 2010 Intel Corporation, Lin Ming <ming.m.lin@intel.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * For licencing details see kernel-base/COPYING
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/perf_event.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <asm/perf_event_p4.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <asm/hardirq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <asm/apic.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include "../perf_event.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #define P4_CNTR_LIMIT 3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) * array indices: 0,1 - HT threads, used with HT enabled cpu
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) struct p4_event_bind {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) unsigned int opcode; /* Event code and ESCR selector */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) unsigned int escr_msr[2]; /* ESCR MSR for this event */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) unsigned int escr_emask; /* valid ESCR EventMask bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) unsigned int shared; /* event is shared across threads */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) char cntr[2][P4_CNTR_LIMIT]; /* counter index (offset), -1 on abscence */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) struct p4_pebs_bind {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) unsigned int metric_pebs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) unsigned int metric_vert;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) /* it sets P4_PEBS_ENABLE_UOP_TAG as well */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #define P4_GEN_PEBS_BIND(name, pebs, vert) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) [P4_PEBS_METRIC__##name] = { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) .metric_pebs = pebs | P4_PEBS_ENABLE_UOP_TAG, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) .metric_vert = vert, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) * note we have P4_PEBS_ENABLE_UOP_TAG always set here
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) * it's needed for mapping P4_PEBS_CONFIG_METRIC_MASK bits of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) * event configuration to find out which values are to be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) * written into MSR_IA32_PEBS_ENABLE and MSR_P4_PEBS_MATRIX_VERT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) * resgisters
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) static struct p4_pebs_bind p4_pebs_bind_map[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) P4_GEN_PEBS_BIND(1stl_cache_load_miss_retired, 0x0000001, 0x0000001),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) P4_GEN_PEBS_BIND(2ndl_cache_load_miss_retired, 0x0000002, 0x0000001),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) P4_GEN_PEBS_BIND(dtlb_load_miss_retired, 0x0000004, 0x0000001),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) P4_GEN_PEBS_BIND(dtlb_store_miss_retired, 0x0000004, 0x0000002),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) P4_GEN_PEBS_BIND(dtlb_all_miss_retired, 0x0000004, 0x0000003),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) P4_GEN_PEBS_BIND(tagged_mispred_branch, 0x0018000, 0x0000010),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) P4_GEN_PEBS_BIND(mob_load_replay_retired, 0x0000200, 0x0000001),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) P4_GEN_PEBS_BIND(split_load_retired, 0x0000400, 0x0000001),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) P4_GEN_PEBS_BIND(split_store_retired, 0x0000400, 0x0000002),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) * Note that we don't use CCCR1 here, there is an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) * exception for P4_BSQ_ALLOCATION but we just have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) * no workaround
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) * consider this binding as resources which particular
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) * event may borrow, it doesn't contain EventMask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) * Tags and friends -- they are left to a caller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) static struct p4_event_bind p4_event_bind_map[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) [P4_EVENT_TC_DELIVER_MODE] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) .opcode = P4_OPCODE(P4_EVENT_TC_DELIVER_MODE),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) .escr_msr = { MSR_P4_TC_ESCR0, MSR_P4_TC_ESCR1 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) .escr_emask =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) P4_ESCR_EMASK_BIT(P4_EVENT_TC_DELIVER_MODE, DD) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) P4_ESCR_EMASK_BIT(P4_EVENT_TC_DELIVER_MODE, DB) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) P4_ESCR_EMASK_BIT(P4_EVENT_TC_DELIVER_MODE, DI) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) P4_ESCR_EMASK_BIT(P4_EVENT_TC_DELIVER_MODE, BD) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) P4_ESCR_EMASK_BIT(P4_EVENT_TC_DELIVER_MODE, BB) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) P4_ESCR_EMASK_BIT(P4_EVENT_TC_DELIVER_MODE, BI) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) P4_ESCR_EMASK_BIT(P4_EVENT_TC_DELIVER_MODE, ID),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) .shared = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) .cntr = { {4, 5, -1}, {6, 7, -1} },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) [P4_EVENT_BPU_FETCH_REQUEST] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) .opcode = P4_OPCODE(P4_EVENT_BPU_FETCH_REQUEST),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) .escr_msr = { MSR_P4_BPU_ESCR0, MSR_P4_BPU_ESCR1 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) .escr_emask =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) P4_ESCR_EMASK_BIT(P4_EVENT_BPU_FETCH_REQUEST, TCMISS),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) .cntr = { {0, -1, -1}, {2, -1, -1} },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) [P4_EVENT_ITLB_REFERENCE] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) .opcode = P4_OPCODE(P4_EVENT_ITLB_REFERENCE),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) .escr_msr = { MSR_P4_ITLB_ESCR0, MSR_P4_ITLB_ESCR1 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) .escr_emask =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) P4_ESCR_EMASK_BIT(P4_EVENT_ITLB_REFERENCE, HIT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) P4_ESCR_EMASK_BIT(P4_EVENT_ITLB_REFERENCE, MISS) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) P4_ESCR_EMASK_BIT(P4_EVENT_ITLB_REFERENCE, HIT_UK),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) .cntr = { {0, -1, -1}, {2, -1, -1} },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) [P4_EVENT_MEMORY_CANCEL] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) .opcode = P4_OPCODE(P4_EVENT_MEMORY_CANCEL),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) .escr_msr = { MSR_P4_DAC_ESCR0, MSR_P4_DAC_ESCR1 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) .escr_emask =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) P4_ESCR_EMASK_BIT(P4_EVENT_MEMORY_CANCEL, ST_RB_FULL) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) P4_ESCR_EMASK_BIT(P4_EVENT_MEMORY_CANCEL, 64K_CONF),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) .cntr = { {8, 9, -1}, {10, 11, -1} },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) [P4_EVENT_MEMORY_COMPLETE] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) .opcode = P4_OPCODE(P4_EVENT_MEMORY_COMPLETE),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) .escr_msr = { MSR_P4_SAAT_ESCR0 , MSR_P4_SAAT_ESCR1 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) .escr_emask =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) P4_ESCR_EMASK_BIT(P4_EVENT_MEMORY_COMPLETE, LSC) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) P4_ESCR_EMASK_BIT(P4_EVENT_MEMORY_COMPLETE, SSC),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) .cntr = { {8, 9, -1}, {10, 11, -1} },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) [P4_EVENT_LOAD_PORT_REPLAY] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) .opcode = P4_OPCODE(P4_EVENT_LOAD_PORT_REPLAY),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) .escr_msr = { MSR_P4_SAAT_ESCR0, MSR_P4_SAAT_ESCR1 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) .escr_emask =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) P4_ESCR_EMASK_BIT(P4_EVENT_LOAD_PORT_REPLAY, SPLIT_LD),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) .cntr = { {8, 9, -1}, {10, 11, -1} },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) [P4_EVENT_STORE_PORT_REPLAY] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) .opcode = P4_OPCODE(P4_EVENT_STORE_PORT_REPLAY),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) .escr_msr = { MSR_P4_SAAT_ESCR0 , MSR_P4_SAAT_ESCR1 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) .escr_emask =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) P4_ESCR_EMASK_BIT(P4_EVENT_STORE_PORT_REPLAY, SPLIT_ST),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) .cntr = { {8, 9, -1}, {10, 11, -1} },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) [P4_EVENT_MOB_LOAD_REPLAY] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) .opcode = P4_OPCODE(P4_EVENT_MOB_LOAD_REPLAY),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) .escr_msr = { MSR_P4_MOB_ESCR0, MSR_P4_MOB_ESCR1 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) .escr_emask =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) P4_ESCR_EMASK_BIT(P4_EVENT_MOB_LOAD_REPLAY, NO_STA) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) P4_ESCR_EMASK_BIT(P4_EVENT_MOB_LOAD_REPLAY, NO_STD) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) P4_ESCR_EMASK_BIT(P4_EVENT_MOB_LOAD_REPLAY, PARTIAL_DATA) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) P4_ESCR_EMASK_BIT(P4_EVENT_MOB_LOAD_REPLAY, UNALGN_ADDR),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) .cntr = { {0, -1, -1}, {2, -1, -1} },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) [P4_EVENT_PAGE_WALK_TYPE] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) .opcode = P4_OPCODE(P4_EVENT_PAGE_WALK_TYPE),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) .escr_msr = { MSR_P4_PMH_ESCR0, MSR_P4_PMH_ESCR1 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) .escr_emask =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) P4_ESCR_EMASK_BIT(P4_EVENT_PAGE_WALK_TYPE, DTMISS) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) P4_ESCR_EMASK_BIT(P4_EVENT_PAGE_WALK_TYPE, ITMISS),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) .shared = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) .cntr = { {0, -1, -1}, {2, -1, -1} },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) [P4_EVENT_BSQ_CACHE_REFERENCE] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) .opcode = P4_OPCODE(P4_EVENT_BSQ_CACHE_REFERENCE),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) .escr_msr = { MSR_P4_BSU_ESCR0, MSR_P4_BSU_ESCR1 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) .escr_emask =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE, RD_2ndL_HITS) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE, RD_2ndL_HITE) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE, RD_2ndL_HITM) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE, RD_3rdL_HITS) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE, RD_3rdL_HITE) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE, RD_3rdL_HITM) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE, RD_2ndL_MISS) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE, RD_3rdL_MISS) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE, WR_2ndL_MISS),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) .cntr = { {0, -1, -1}, {2, -1, -1} },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) [P4_EVENT_IOQ_ALLOCATION] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) .opcode = P4_OPCODE(P4_EVENT_IOQ_ALLOCATION),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) .escr_msr = { MSR_P4_FSB_ESCR0, MSR_P4_FSB_ESCR1 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) .escr_emask =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ALLOCATION, DEFAULT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ALLOCATION, ALL_READ) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ALLOCATION, ALL_WRITE) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ALLOCATION, MEM_UC) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ALLOCATION, MEM_WC) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ALLOCATION, MEM_WT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ALLOCATION, MEM_WP) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ALLOCATION, MEM_WB) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ALLOCATION, OWN) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ALLOCATION, OTHER) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ALLOCATION, PREFETCH),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) .cntr = { {0, -1, -1}, {2, -1, -1} },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) [P4_EVENT_IOQ_ACTIVE_ENTRIES] = { /* shared ESCR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) .opcode = P4_OPCODE(P4_EVENT_IOQ_ACTIVE_ENTRIES),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) .escr_msr = { MSR_P4_FSB_ESCR1, MSR_P4_FSB_ESCR1 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) .escr_emask =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ACTIVE_ENTRIES, DEFAULT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ACTIVE_ENTRIES, ALL_READ) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ACTIVE_ENTRIES, ALL_WRITE) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ACTIVE_ENTRIES, MEM_UC) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ACTIVE_ENTRIES, MEM_WC) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ACTIVE_ENTRIES, MEM_WT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ACTIVE_ENTRIES, MEM_WP) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ACTIVE_ENTRIES, MEM_WB) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ACTIVE_ENTRIES, OWN) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ACTIVE_ENTRIES, OTHER) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ACTIVE_ENTRIES, PREFETCH),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) .cntr = { {2, -1, -1}, {3, -1, -1} },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) [P4_EVENT_FSB_DATA_ACTIVITY] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) .opcode = P4_OPCODE(P4_EVENT_FSB_DATA_ACTIVITY),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) .escr_msr = { MSR_P4_FSB_ESCR0, MSR_P4_FSB_ESCR1 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) .escr_emask =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) P4_ESCR_EMASK_BIT(P4_EVENT_FSB_DATA_ACTIVITY, DRDY_DRV) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) P4_ESCR_EMASK_BIT(P4_EVENT_FSB_DATA_ACTIVITY, DRDY_OWN) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) P4_ESCR_EMASK_BIT(P4_EVENT_FSB_DATA_ACTIVITY, DRDY_OTHER) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) P4_ESCR_EMASK_BIT(P4_EVENT_FSB_DATA_ACTIVITY, DBSY_DRV) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) P4_ESCR_EMASK_BIT(P4_EVENT_FSB_DATA_ACTIVITY, DBSY_OWN) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) P4_ESCR_EMASK_BIT(P4_EVENT_FSB_DATA_ACTIVITY, DBSY_OTHER),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) .shared = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) .cntr = { {0, -1, -1}, {2, -1, -1} },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) [P4_EVENT_BSQ_ALLOCATION] = { /* shared ESCR, broken CCCR1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) .opcode = P4_OPCODE(P4_EVENT_BSQ_ALLOCATION),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) .escr_msr = { MSR_P4_BSU_ESCR0, MSR_P4_BSU_ESCR0 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) .escr_emask =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ALLOCATION, REQ_TYPE0) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ALLOCATION, REQ_TYPE1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ALLOCATION, REQ_LEN0) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ALLOCATION, REQ_LEN1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ALLOCATION, REQ_IO_TYPE) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ALLOCATION, REQ_LOCK_TYPE) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ALLOCATION, REQ_CACHE_TYPE) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ALLOCATION, REQ_SPLIT_TYPE) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ALLOCATION, REQ_DEM_TYPE) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ALLOCATION, REQ_ORD_TYPE) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ALLOCATION, MEM_TYPE0) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ALLOCATION, MEM_TYPE1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ALLOCATION, MEM_TYPE2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) .cntr = { {0, -1, -1}, {1, -1, -1} },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) [P4_EVENT_BSQ_ACTIVE_ENTRIES] = { /* shared ESCR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) .opcode = P4_OPCODE(P4_EVENT_BSQ_ACTIVE_ENTRIES),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) .escr_msr = { MSR_P4_BSU_ESCR1 , MSR_P4_BSU_ESCR1 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) .escr_emask =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ACTIVE_ENTRIES, REQ_TYPE0) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ACTIVE_ENTRIES, REQ_TYPE1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ACTIVE_ENTRIES, REQ_LEN0) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ACTIVE_ENTRIES, REQ_LEN1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ACTIVE_ENTRIES, REQ_IO_TYPE) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ACTIVE_ENTRIES, REQ_LOCK_TYPE) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ACTIVE_ENTRIES, REQ_CACHE_TYPE) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ACTIVE_ENTRIES, REQ_SPLIT_TYPE) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ACTIVE_ENTRIES, REQ_DEM_TYPE) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ACTIVE_ENTRIES, REQ_ORD_TYPE) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ACTIVE_ENTRIES, MEM_TYPE0) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ACTIVE_ENTRIES, MEM_TYPE1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ACTIVE_ENTRIES, MEM_TYPE2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) .cntr = { {2, -1, -1}, {3, -1, -1} },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) [P4_EVENT_SSE_INPUT_ASSIST] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) .opcode = P4_OPCODE(P4_EVENT_SSE_INPUT_ASSIST),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) .escr_msr = { MSR_P4_FIRM_ESCR0, MSR_P4_FIRM_ESCR1 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) .escr_emask =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) P4_ESCR_EMASK_BIT(P4_EVENT_SSE_INPUT_ASSIST, ALL),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) .shared = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) .cntr = { {8, 9, -1}, {10, 11, -1} },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) [P4_EVENT_PACKED_SP_UOP] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) .opcode = P4_OPCODE(P4_EVENT_PACKED_SP_UOP),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) .escr_msr = { MSR_P4_FIRM_ESCR0, MSR_P4_FIRM_ESCR1 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) .escr_emask =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) P4_ESCR_EMASK_BIT(P4_EVENT_PACKED_SP_UOP, ALL),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) .shared = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) .cntr = { {8, 9, -1}, {10, 11, -1} },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) [P4_EVENT_PACKED_DP_UOP] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) .opcode = P4_OPCODE(P4_EVENT_PACKED_DP_UOP),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) .escr_msr = { MSR_P4_FIRM_ESCR0, MSR_P4_FIRM_ESCR1 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) .escr_emask =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) P4_ESCR_EMASK_BIT(P4_EVENT_PACKED_DP_UOP, ALL),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) .shared = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) .cntr = { {8, 9, -1}, {10, 11, -1} },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) [P4_EVENT_SCALAR_SP_UOP] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) .opcode = P4_OPCODE(P4_EVENT_SCALAR_SP_UOP),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) .escr_msr = { MSR_P4_FIRM_ESCR0, MSR_P4_FIRM_ESCR1 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) .escr_emask =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) P4_ESCR_EMASK_BIT(P4_EVENT_SCALAR_SP_UOP, ALL),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) .shared = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) .cntr = { {8, 9, -1}, {10, 11, -1} },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) [P4_EVENT_SCALAR_DP_UOP] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) .opcode = P4_OPCODE(P4_EVENT_SCALAR_DP_UOP),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) .escr_msr = { MSR_P4_FIRM_ESCR0, MSR_P4_FIRM_ESCR1 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) .escr_emask =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) P4_ESCR_EMASK_BIT(P4_EVENT_SCALAR_DP_UOP, ALL),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) .shared = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) .cntr = { {8, 9, -1}, {10, 11, -1} },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) [P4_EVENT_64BIT_MMX_UOP] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) .opcode = P4_OPCODE(P4_EVENT_64BIT_MMX_UOP),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) .escr_msr = { MSR_P4_FIRM_ESCR0, MSR_P4_FIRM_ESCR1 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) .escr_emask =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) P4_ESCR_EMASK_BIT(P4_EVENT_64BIT_MMX_UOP, ALL),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) .shared = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) .cntr = { {8, 9, -1}, {10, 11, -1} },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) [P4_EVENT_128BIT_MMX_UOP] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) .opcode = P4_OPCODE(P4_EVENT_128BIT_MMX_UOP),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) .escr_msr = { MSR_P4_FIRM_ESCR0, MSR_P4_FIRM_ESCR1 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) .escr_emask =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) P4_ESCR_EMASK_BIT(P4_EVENT_128BIT_MMX_UOP, ALL),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) .shared = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) .cntr = { {8, 9, -1}, {10, 11, -1} },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) [P4_EVENT_X87_FP_UOP] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) .opcode = P4_OPCODE(P4_EVENT_X87_FP_UOP),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) .escr_msr = { MSR_P4_FIRM_ESCR0, MSR_P4_FIRM_ESCR1 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) .escr_emask =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) P4_ESCR_EMASK_BIT(P4_EVENT_X87_FP_UOP, ALL),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) .shared = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) .cntr = { {8, 9, -1}, {10, 11, -1} },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) [P4_EVENT_TC_MISC] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) .opcode = P4_OPCODE(P4_EVENT_TC_MISC),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) .escr_msr = { MSR_P4_TC_ESCR0, MSR_P4_TC_ESCR1 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) .escr_emask =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) P4_ESCR_EMASK_BIT(P4_EVENT_TC_MISC, FLUSH),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) .cntr = { {4, 5, -1}, {6, 7, -1} },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) [P4_EVENT_GLOBAL_POWER_EVENTS] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) .opcode = P4_OPCODE(P4_EVENT_GLOBAL_POWER_EVENTS),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) .escr_msr = { MSR_P4_FSB_ESCR0, MSR_P4_FSB_ESCR1 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) .escr_emask =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) P4_ESCR_EMASK_BIT(P4_EVENT_GLOBAL_POWER_EVENTS, RUNNING),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) .cntr = { {0, -1, -1}, {2, -1, -1} },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) [P4_EVENT_TC_MS_XFER] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) .opcode = P4_OPCODE(P4_EVENT_TC_MS_XFER),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) .escr_msr = { MSR_P4_MS_ESCR0, MSR_P4_MS_ESCR1 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) .escr_emask =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) P4_ESCR_EMASK_BIT(P4_EVENT_TC_MS_XFER, CISC),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) .cntr = { {4, 5, -1}, {6, 7, -1} },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) [P4_EVENT_UOP_QUEUE_WRITES] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) .opcode = P4_OPCODE(P4_EVENT_UOP_QUEUE_WRITES),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) .escr_msr = { MSR_P4_MS_ESCR0, MSR_P4_MS_ESCR1 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) .escr_emask =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) P4_ESCR_EMASK_BIT(P4_EVENT_UOP_QUEUE_WRITES, FROM_TC_BUILD) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) P4_ESCR_EMASK_BIT(P4_EVENT_UOP_QUEUE_WRITES, FROM_TC_DELIVER) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) P4_ESCR_EMASK_BIT(P4_EVENT_UOP_QUEUE_WRITES, FROM_ROM),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) .cntr = { {4, 5, -1}, {6, 7, -1} },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) [P4_EVENT_RETIRED_MISPRED_BRANCH_TYPE] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) .opcode = P4_OPCODE(P4_EVENT_RETIRED_MISPRED_BRANCH_TYPE),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) .escr_msr = { MSR_P4_TBPU_ESCR0 , MSR_P4_TBPU_ESCR0 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) .escr_emask =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) P4_ESCR_EMASK_BIT(P4_EVENT_RETIRED_MISPRED_BRANCH_TYPE, CONDITIONAL) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) P4_ESCR_EMASK_BIT(P4_EVENT_RETIRED_MISPRED_BRANCH_TYPE, CALL) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) P4_ESCR_EMASK_BIT(P4_EVENT_RETIRED_MISPRED_BRANCH_TYPE, RETURN) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) P4_ESCR_EMASK_BIT(P4_EVENT_RETIRED_MISPRED_BRANCH_TYPE, INDIRECT),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) .cntr = { {4, 5, -1}, {6, 7, -1} },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) [P4_EVENT_RETIRED_BRANCH_TYPE] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) .opcode = P4_OPCODE(P4_EVENT_RETIRED_BRANCH_TYPE),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) .escr_msr = { MSR_P4_TBPU_ESCR0 , MSR_P4_TBPU_ESCR1 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) .escr_emask =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) P4_ESCR_EMASK_BIT(P4_EVENT_RETIRED_BRANCH_TYPE, CONDITIONAL) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) P4_ESCR_EMASK_BIT(P4_EVENT_RETIRED_BRANCH_TYPE, CALL) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) P4_ESCR_EMASK_BIT(P4_EVENT_RETIRED_BRANCH_TYPE, RETURN) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) P4_ESCR_EMASK_BIT(P4_EVENT_RETIRED_BRANCH_TYPE, INDIRECT),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) .cntr = { {4, 5, -1}, {6, 7, -1} },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) [P4_EVENT_RESOURCE_STALL] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) .opcode = P4_OPCODE(P4_EVENT_RESOURCE_STALL),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) .escr_msr = { MSR_P4_ALF_ESCR0, MSR_P4_ALF_ESCR1 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) .escr_emask =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) P4_ESCR_EMASK_BIT(P4_EVENT_RESOURCE_STALL, SBFULL),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) .cntr = { {12, 13, 16}, {14, 15, 17} },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) [P4_EVENT_WC_BUFFER] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) .opcode = P4_OPCODE(P4_EVENT_WC_BUFFER),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) .escr_msr = { MSR_P4_DAC_ESCR0, MSR_P4_DAC_ESCR1 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) .escr_emask =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) P4_ESCR_EMASK_BIT(P4_EVENT_WC_BUFFER, WCB_EVICTS) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) P4_ESCR_EMASK_BIT(P4_EVENT_WC_BUFFER, WCB_FULL_EVICTS),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) .shared = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) .cntr = { {8, 9, -1}, {10, 11, -1} },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) [P4_EVENT_B2B_CYCLES] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) .opcode = P4_OPCODE(P4_EVENT_B2B_CYCLES),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) .escr_msr = { MSR_P4_FSB_ESCR0, MSR_P4_FSB_ESCR1 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) .escr_emask = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) .cntr = { {0, -1, -1}, {2, -1, -1} },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) [P4_EVENT_BNR] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) .opcode = P4_OPCODE(P4_EVENT_BNR),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) .escr_msr = { MSR_P4_FSB_ESCR0, MSR_P4_FSB_ESCR1 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) .escr_emask = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) .cntr = { {0, -1, -1}, {2, -1, -1} },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) [P4_EVENT_SNOOP] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) .opcode = P4_OPCODE(P4_EVENT_SNOOP),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) .escr_msr = { MSR_P4_FSB_ESCR0, MSR_P4_FSB_ESCR1 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) .escr_emask = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) .cntr = { {0, -1, -1}, {2, -1, -1} },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) [P4_EVENT_RESPONSE] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) .opcode = P4_OPCODE(P4_EVENT_RESPONSE),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) .escr_msr = { MSR_P4_FSB_ESCR0, MSR_P4_FSB_ESCR1 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) .escr_emask = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) .cntr = { {0, -1, -1}, {2, -1, -1} },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) [P4_EVENT_FRONT_END_EVENT] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) .opcode = P4_OPCODE(P4_EVENT_FRONT_END_EVENT),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) .escr_msr = { MSR_P4_CRU_ESCR2, MSR_P4_CRU_ESCR3 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) .escr_emask =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) P4_ESCR_EMASK_BIT(P4_EVENT_FRONT_END_EVENT, NBOGUS) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) P4_ESCR_EMASK_BIT(P4_EVENT_FRONT_END_EVENT, BOGUS),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) .cntr = { {12, 13, 16}, {14, 15, 17} },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) [P4_EVENT_EXECUTION_EVENT] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) .opcode = P4_OPCODE(P4_EVENT_EXECUTION_EVENT),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) .escr_msr = { MSR_P4_CRU_ESCR2, MSR_P4_CRU_ESCR3 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) .escr_emask =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) P4_ESCR_EMASK_BIT(P4_EVENT_EXECUTION_EVENT, NBOGUS0) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) P4_ESCR_EMASK_BIT(P4_EVENT_EXECUTION_EVENT, NBOGUS1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) P4_ESCR_EMASK_BIT(P4_EVENT_EXECUTION_EVENT, NBOGUS2) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) P4_ESCR_EMASK_BIT(P4_EVENT_EXECUTION_EVENT, NBOGUS3) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) P4_ESCR_EMASK_BIT(P4_EVENT_EXECUTION_EVENT, BOGUS0) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) P4_ESCR_EMASK_BIT(P4_EVENT_EXECUTION_EVENT, BOGUS1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) P4_ESCR_EMASK_BIT(P4_EVENT_EXECUTION_EVENT, BOGUS2) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) P4_ESCR_EMASK_BIT(P4_EVENT_EXECUTION_EVENT, BOGUS3),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) .cntr = { {12, 13, 16}, {14, 15, 17} },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) [P4_EVENT_REPLAY_EVENT] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) .opcode = P4_OPCODE(P4_EVENT_REPLAY_EVENT),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) .escr_msr = { MSR_P4_CRU_ESCR2, MSR_P4_CRU_ESCR3 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) .escr_emask =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) P4_ESCR_EMASK_BIT(P4_EVENT_REPLAY_EVENT, NBOGUS) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) P4_ESCR_EMASK_BIT(P4_EVENT_REPLAY_EVENT, BOGUS),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) .cntr = { {12, 13, 16}, {14, 15, 17} },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) [P4_EVENT_INSTR_RETIRED] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) .opcode = P4_OPCODE(P4_EVENT_INSTR_RETIRED),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) .escr_msr = { MSR_P4_CRU_ESCR0, MSR_P4_CRU_ESCR1 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) .escr_emask =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) P4_ESCR_EMASK_BIT(P4_EVENT_INSTR_RETIRED, NBOGUSNTAG) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) P4_ESCR_EMASK_BIT(P4_EVENT_INSTR_RETIRED, NBOGUSTAG) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) P4_ESCR_EMASK_BIT(P4_EVENT_INSTR_RETIRED, BOGUSNTAG) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) P4_ESCR_EMASK_BIT(P4_EVENT_INSTR_RETIRED, BOGUSTAG),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) .cntr = { {12, 13, 16}, {14, 15, 17} },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) [P4_EVENT_UOPS_RETIRED] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) .opcode = P4_OPCODE(P4_EVENT_UOPS_RETIRED),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) .escr_msr = { MSR_P4_CRU_ESCR0, MSR_P4_CRU_ESCR1 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) .escr_emask =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) P4_ESCR_EMASK_BIT(P4_EVENT_UOPS_RETIRED, NBOGUS) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) P4_ESCR_EMASK_BIT(P4_EVENT_UOPS_RETIRED, BOGUS),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) .cntr = { {12, 13, 16}, {14, 15, 17} },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) [P4_EVENT_UOP_TYPE] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) .opcode = P4_OPCODE(P4_EVENT_UOP_TYPE),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) .escr_msr = { MSR_P4_RAT_ESCR0, MSR_P4_RAT_ESCR1 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) .escr_emask =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) P4_ESCR_EMASK_BIT(P4_EVENT_UOP_TYPE, TAGLOADS) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) P4_ESCR_EMASK_BIT(P4_EVENT_UOP_TYPE, TAGSTORES),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) .cntr = { {12, 13, 16}, {14, 15, 17} },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) [P4_EVENT_BRANCH_RETIRED] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) .opcode = P4_OPCODE(P4_EVENT_BRANCH_RETIRED),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) .escr_msr = { MSR_P4_CRU_ESCR2, MSR_P4_CRU_ESCR3 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) .escr_emask =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) P4_ESCR_EMASK_BIT(P4_EVENT_BRANCH_RETIRED, MMNP) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) P4_ESCR_EMASK_BIT(P4_EVENT_BRANCH_RETIRED, MMNM) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) P4_ESCR_EMASK_BIT(P4_EVENT_BRANCH_RETIRED, MMTP) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) P4_ESCR_EMASK_BIT(P4_EVENT_BRANCH_RETIRED, MMTM),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) .cntr = { {12, 13, 16}, {14, 15, 17} },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) [P4_EVENT_MISPRED_BRANCH_RETIRED] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) .opcode = P4_OPCODE(P4_EVENT_MISPRED_BRANCH_RETIRED),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) .escr_msr = { MSR_P4_CRU_ESCR0, MSR_P4_CRU_ESCR1 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) .escr_emask =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) P4_ESCR_EMASK_BIT(P4_EVENT_MISPRED_BRANCH_RETIRED, NBOGUS),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) .cntr = { {12, 13, 16}, {14, 15, 17} },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) [P4_EVENT_X87_ASSIST] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) .opcode = P4_OPCODE(P4_EVENT_X87_ASSIST),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) .escr_msr = { MSR_P4_CRU_ESCR2, MSR_P4_CRU_ESCR3 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) .escr_emask =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) P4_ESCR_EMASK_BIT(P4_EVENT_X87_ASSIST, FPSU) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) P4_ESCR_EMASK_BIT(P4_EVENT_X87_ASSIST, FPSO) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) P4_ESCR_EMASK_BIT(P4_EVENT_X87_ASSIST, POAO) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) P4_ESCR_EMASK_BIT(P4_EVENT_X87_ASSIST, POAU) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) P4_ESCR_EMASK_BIT(P4_EVENT_X87_ASSIST, PREA),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) .cntr = { {12, 13, 16}, {14, 15, 17} },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) [P4_EVENT_MACHINE_CLEAR] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) .opcode = P4_OPCODE(P4_EVENT_MACHINE_CLEAR),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) .escr_msr = { MSR_P4_CRU_ESCR2, MSR_P4_CRU_ESCR3 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) .escr_emask =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) P4_ESCR_EMASK_BIT(P4_EVENT_MACHINE_CLEAR, CLEAR) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) P4_ESCR_EMASK_BIT(P4_EVENT_MACHINE_CLEAR, MOCLEAR) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) P4_ESCR_EMASK_BIT(P4_EVENT_MACHINE_CLEAR, SMCLEAR),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) .cntr = { {12, 13, 16}, {14, 15, 17} },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) [P4_EVENT_INSTR_COMPLETED] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) .opcode = P4_OPCODE(P4_EVENT_INSTR_COMPLETED),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) .escr_msr = { MSR_P4_CRU_ESCR0, MSR_P4_CRU_ESCR1 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) .escr_emask =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) P4_ESCR_EMASK_BIT(P4_EVENT_INSTR_COMPLETED, NBOGUS) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) P4_ESCR_EMASK_BIT(P4_EVENT_INSTR_COMPLETED, BOGUS),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) .cntr = { {12, 13, 16}, {14, 15, 17} },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) #define P4_GEN_CACHE_EVENT(event, bit, metric) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) p4_config_pack_escr(P4_ESCR_EVENT(event) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) P4_ESCR_EMASK_BIT(event, bit)) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) p4_config_pack_cccr(metric | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) P4_CCCR_ESEL(P4_OPCODE_ESEL(P4_OPCODE(event))))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) static __initconst const u64 p4_hw_cache_event_ids
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) [PERF_COUNT_HW_CACHE_MAX]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) [PERF_COUNT_HW_CACHE_OP_MAX]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) [PERF_COUNT_HW_CACHE_RESULT_MAX] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) [ C(L1D ) ] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) [ C(OP_READ) ] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) [ C(RESULT_ACCESS) ] = 0x0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) [ C(RESULT_MISS) ] = P4_GEN_CACHE_EVENT(P4_EVENT_REPLAY_EVENT, NBOGUS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) P4_PEBS_METRIC__1stl_cache_load_miss_retired),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) [ C(LL ) ] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) [ C(OP_READ) ] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) [ C(RESULT_ACCESS) ] = 0x0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) [ C(RESULT_MISS) ] = P4_GEN_CACHE_EVENT(P4_EVENT_REPLAY_EVENT, NBOGUS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) P4_PEBS_METRIC__2ndl_cache_load_miss_retired),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) [ C(DTLB) ] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) [ C(OP_READ) ] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) [ C(RESULT_ACCESS) ] = 0x0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) [ C(RESULT_MISS) ] = P4_GEN_CACHE_EVENT(P4_EVENT_REPLAY_EVENT, NBOGUS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) P4_PEBS_METRIC__dtlb_load_miss_retired),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) [ C(OP_WRITE) ] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) [ C(RESULT_ACCESS) ] = 0x0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) [ C(RESULT_MISS) ] = P4_GEN_CACHE_EVENT(P4_EVENT_REPLAY_EVENT, NBOGUS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) P4_PEBS_METRIC__dtlb_store_miss_retired),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) [ C(ITLB) ] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) [ C(OP_READ) ] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) [ C(RESULT_ACCESS) ] = P4_GEN_CACHE_EVENT(P4_EVENT_ITLB_REFERENCE, HIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) P4_PEBS_METRIC__none),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) [ C(RESULT_MISS) ] = P4_GEN_CACHE_EVENT(P4_EVENT_ITLB_REFERENCE, MISS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) P4_PEBS_METRIC__none),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) [ C(OP_WRITE) ] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) [ C(RESULT_ACCESS) ] = -1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) [ C(RESULT_MISS) ] = -1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) [ C(OP_PREFETCH) ] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) [ C(RESULT_ACCESS) ] = -1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) [ C(RESULT_MISS) ] = -1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) [ C(NODE) ] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) [ C(OP_READ) ] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) [ C(RESULT_ACCESS) ] = -1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) [ C(RESULT_MISS) ] = -1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) [ C(OP_WRITE) ] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) [ C(RESULT_ACCESS) ] = -1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) [ C(RESULT_MISS) ] = -1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) [ C(OP_PREFETCH) ] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) [ C(RESULT_ACCESS) ] = -1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) [ C(RESULT_MISS) ] = -1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) * Because of Netburst being quite restricted in how many
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) * identical events may run simultaneously, we introduce event aliases,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) * ie the different events which have the same functionality but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) * utilize non-intersected resources (ESCR/CCCR/counter registers).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) * This allow us to relax restrictions a bit and run two or more
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) * identical events together.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) * Never set any custom internal bits such as P4_CONFIG_HT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) * P4_CONFIG_ALIASABLE or bits for P4_PEBS_METRIC, they are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) * either up to date automatically or not applicable at all.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) static struct p4_event_alias {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) u64 original;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) u64 alternative;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) } p4_event_aliases[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) * Non-halted cycles can be substituted with non-sleeping cycles (see
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) * Intel SDM Vol3b for details). We need this alias to be able
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) * to run nmi-watchdog and 'perf top' (or any other user space tool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) * which is interested in running PERF_COUNT_HW_CPU_CYCLES)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) * simultaneously.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) .original =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) p4_config_pack_escr(P4_ESCR_EVENT(P4_EVENT_GLOBAL_POWER_EVENTS) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) P4_ESCR_EMASK_BIT(P4_EVENT_GLOBAL_POWER_EVENTS, RUNNING)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) .alternative =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) p4_config_pack_escr(P4_ESCR_EVENT(P4_EVENT_EXECUTION_EVENT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) P4_ESCR_EMASK_BIT(P4_EVENT_EXECUTION_EVENT, NBOGUS0)|
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) P4_ESCR_EMASK_BIT(P4_EVENT_EXECUTION_EVENT, NBOGUS1)|
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) P4_ESCR_EMASK_BIT(P4_EVENT_EXECUTION_EVENT, NBOGUS2)|
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) P4_ESCR_EMASK_BIT(P4_EVENT_EXECUTION_EVENT, NBOGUS3)|
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) P4_ESCR_EMASK_BIT(P4_EVENT_EXECUTION_EVENT, BOGUS0) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) P4_ESCR_EMASK_BIT(P4_EVENT_EXECUTION_EVENT, BOGUS1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) P4_ESCR_EMASK_BIT(P4_EVENT_EXECUTION_EVENT, BOGUS2) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) P4_ESCR_EMASK_BIT(P4_EVENT_EXECUTION_EVENT, BOGUS3))|
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) p4_config_pack_cccr(P4_CCCR_THRESHOLD(15) | P4_CCCR_COMPLEMENT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) P4_CCCR_COMPARE),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) static u64 p4_get_alias_event(u64 config)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) u64 config_match;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) * Only event with special mark is allowed,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) * we're to be sure it didn't come as malformed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) * RAW event.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) if (!(config & P4_CONFIG_ALIASABLE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) config_match = config & P4_CONFIG_EVENT_ALIAS_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) for (i = 0; i < ARRAY_SIZE(p4_event_aliases); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) if (config_match == p4_event_aliases[i].original) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) config_match = p4_event_aliases[i].alternative;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) } else if (config_match == p4_event_aliases[i].alternative) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) config_match = p4_event_aliases[i].original;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) if (i >= ARRAY_SIZE(p4_event_aliases))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) return config_match | (config & P4_CONFIG_EVENT_ALIAS_IMMUTABLE_BITS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) static u64 p4_general_events[PERF_COUNT_HW_MAX] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) /* non-halted CPU clocks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) [PERF_COUNT_HW_CPU_CYCLES] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) p4_config_pack_escr(P4_ESCR_EVENT(P4_EVENT_GLOBAL_POWER_EVENTS) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) P4_ESCR_EMASK_BIT(P4_EVENT_GLOBAL_POWER_EVENTS, RUNNING)) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) P4_CONFIG_ALIASABLE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) * retired instructions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) * in a sake of simplicity we don't use the FSB tagging
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) [PERF_COUNT_HW_INSTRUCTIONS] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) p4_config_pack_escr(P4_ESCR_EVENT(P4_EVENT_INSTR_RETIRED) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) P4_ESCR_EMASK_BIT(P4_EVENT_INSTR_RETIRED, NBOGUSNTAG) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) P4_ESCR_EMASK_BIT(P4_EVENT_INSTR_RETIRED, BOGUSNTAG)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) /* cache hits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) [PERF_COUNT_HW_CACHE_REFERENCES] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) p4_config_pack_escr(P4_ESCR_EVENT(P4_EVENT_BSQ_CACHE_REFERENCE) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE, RD_2ndL_HITS) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE, RD_2ndL_HITE) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE, RD_2ndL_HITM) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE, RD_3rdL_HITS) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE, RD_3rdL_HITE) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE, RD_3rdL_HITM)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) /* cache misses */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) [PERF_COUNT_HW_CACHE_MISSES] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) p4_config_pack_escr(P4_ESCR_EVENT(P4_EVENT_BSQ_CACHE_REFERENCE) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE, RD_2ndL_MISS) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE, RD_3rdL_MISS) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE, WR_2ndL_MISS)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) /* branch instructions retired */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) p4_config_pack_escr(P4_ESCR_EVENT(P4_EVENT_RETIRED_BRANCH_TYPE) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) P4_ESCR_EMASK_BIT(P4_EVENT_RETIRED_BRANCH_TYPE, CONDITIONAL) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) P4_ESCR_EMASK_BIT(P4_EVENT_RETIRED_BRANCH_TYPE, CALL) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) P4_ESCR_EMASK_BIT(P4_EVENT_RETIRED_BRANCH_TYPE, RETURN) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) P4_ESCR_EMASK_BIT(P4_EVENT_RETIRED_BRANCH_TYPE, INDIRECT)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) /* mispredicted branches retired */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) [PERF_COUNT_HW_BRANCH_MISSES] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) p4_config_pack_escr(P4_ESCR_EVENT(P4_EVENT_MISPRED_BRANCH_RETIRED) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) P4_ESCR_EMASK_BIT(P4_EVENT_MISPRED_BRANCH_RETIRED, NBOGUS)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) /* bus ready clocks (cpu is driving #DRDY_DRV\#DRDY_OWN): */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) [PERF_COUNT_HW_BUS_CYCLES] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) p4_config_pack_escr(P4_ESCR_EVENT(P4_EVENT_FSB_DATA_ACTIVITY) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) P4_ESCR_EMASK_BIT(P4_EVENT_FSB_DATA_ACTIVITY, DRDY_DRV) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) P4_ESCR_EMASK_BIT(P4_EVENT_FSB_DATA_ACTIVITY, DRDY_OWN)) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) p4_config_pack_cccr(P4_CCCR_EDGE | P4_CCCR_COMPARE),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) static struct p4_event_bind *p4_config_get_bind(u64 config)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) unsigned int evnt = p4_config_unpack_event(config);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) struct p4_event_bind *bind = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) if (evnt < ARRAY_SIZE(p4_event_bind_map))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) bind = &p4_event_bind_map[evnt];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) return bind;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) static u64 p4_pmu_event_map(int hw_event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) struct p4_event_bind *bind;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) unsigned int esel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) u64 config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) config = p4_general_events[hw_event];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) bind = p4_config_get_bind(config);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) esel = P4_OPCODE_ESEL(bind->opcode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) config |= p4_config_pack_cccr(P4_CCCR_ESEL(esel));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) return config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) /* check cpu model specifics */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) static bool p4_event_match_cpu_model(unsigned int event_idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) /* INSTR_COMPLETED event only exist for model 3, 4, 6 (Prescott) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) if (event_idx == P4_EVENT_INSTR_COMPLETED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) if (boot_cpu_data.x86_model != 3 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) boot_cpu_data.x86_model != 4 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) boot_cpu_data.x86_model != 6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) * For info
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) * - IQ_ESCR0, IQ_ESCR1 only for models 1 and 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) static int p4_validate_raw_event(struct perf_event *event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) unsigned int v, emask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) /* User data may have out-of-bound event index */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) v = p4_config_unpack_event(event->attr.config);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) if (v >= ARRAY_SIZE(p4_event_bind_map))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) /* It may be unsupported: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) if (!p4_event_match_cpu_model(v))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) * NOTE: P4_CCCR_THREAD_ANY has not the same meaning as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) * in Architectural Performance Monitoring, it means not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) * on _which_ logical cpu to count but rather _when_, ie it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) * depends on logical cpu state -- count event if one cpu active,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) * none, both or any, so we just allow user to pass any value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) * desired.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) * In turn we always set Tx_OS/Tx_USR bits bound to logical
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) * cpu without their propagation to another cpu
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) * if an event is shared across the logical threads
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) * the user needs special permissions to be able to use it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) if (p4_ht_active() && p4_event_bind_map[v].shared) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) v = perf_allow_cpu(&event->attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) if (v)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) return v;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) /* ESCR EventMask bits may be invalid */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) emask = p4_config_unpack_escr(event->attr.config) & P4_ESCR_EVENTMASK_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) if (emask & ~p4_event_bind_map[v].escr_emask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) * it may have some invalid PEBS bits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) if (p4_config_pebs_has(event->attr.config, P4_PEBS_CONFIG_ENABLE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) v = p4_config_unpack_metric(event->attr.config);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) if (v >= ARRAY_SIZE(p4_pebs_bind_map))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) static int p4_hw_config(struct perf_event *event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) int cpu = get_cpu();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) int rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) u32 escr, cccr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) * the reason we use cpu that early is that: if we get scheduled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) * first time on the same cpu -- we will not need swap thread
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) * specific flags in config (and will save some cpu cycles)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) cccr = p4_default_cccr_conf(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) escr = p4_default_escr_conf(cpu, event->attr.exclude_kernel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) event->attr.exclude_user);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) event->hw.config = p4_config_pack_escr(escr) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) p4_config_pack_cccr(cccr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) if (p4_ht_active() && p4_ht_thread(cpu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) event->hw.config = p4_set_ht_bit(event->hw.config);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) if (event->attr.type == PERF_TYPE_RAW) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) struct p4_event_bind *bind;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) unsigned int esel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) * Clear bits we reserve to be managed by kernel itself
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) * and never allowed from a user space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) event->attr.config &= P4_CONFIG_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) rc = p4_validate_raw_event(event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) * Note that for RAW events we allow user to use P4_CCCR_RESERVED
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) * bits since we keep additional info here (for cache events and etc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) event->hw.config |= event->attr.config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) bind = p4_config_get_bind(event->attr.config);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) if (!bind) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) rc = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) esel = P4_OPCODE_ESEL(bind->opcode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) event->hw.config |= p4_config_pack_cccr(P4_CCCR_ESEL(esel));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) rc = x86_setup_perfctr(event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) put_cpu();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) static inline int p4_pmu_clear_cccr_ovf(struct hw_perf_event *hwc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) u64 v;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) /* an official way for overflow indication */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) rdmsrl(hwc->config_base, v);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) if (v & P4_CCCR_OVF) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) wrmsrl(hwc->config_base, v & ~P4_CCCR_OVF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) * In some circumstances the overflow might issue an NMI but did
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) * not set P4_CCCR_OVF bit. Because a counter holds a negative value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) * we simply check for high bit being set, if it's cleared it means
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) * the counter has reached zero value and continued counting before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) * real NMI signal was received:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) rdmsrl(hwc->event_base, v);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) if (!(v & ARCH_P4_UNFLAGGED_BIT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) static void p4_pmu_disable_pebs(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) * FIXME
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) * It's still allowed that two threads setup same cache
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) * events so we can't simply clear metrics until we knew
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) * no one is depending on us, so we need kind of counter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) * for "ReplayEvent" users.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) * What is more complex -- RAW events, if user (for some
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) * reason) will pass some cache event metric with improper
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) * event opcode -- it's fine from hardware point of view
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) * but completely nonsense from "meaning" of such action.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) * So at moment let leave metrics turned on forever -- it's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) * ok for now but need to be revisited!
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) * (void)wrmsrl_safe(MSR_IA32_PEBS_ENABLE, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) * (void)wrmsrl_safe(MSR_P4_PEBS_MATRIX_VERT, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) static inline void p4_pmu_disable_event(struct perf_event *event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) struct hw_perf_event *hwc = &event->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) * If event gets disabled while counter is in overflowed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) * state we need to clear P4_CCCR_OVF, otherwise interrupt get
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) * asserted again and again
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) (void)wrmsrl_safe(hwc->config_base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) p4_config_unpack_cccr(hwc->config) & ~P4_CCCR_ENABLE & ~P4_CCCR_OVF & ~P4_CCCR_RESERVED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) static void p4_pmu_disable_all(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) int idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) for (idx = 0; idx < x86_pmu.num_counters; idx++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) struct perf_event *event = cpuc->events[idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) if (!test_bit(idx, cpuc->active_mask))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) p4_pmu_disable_event(event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) p4_pmu_disable_pebs();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) /* configuration must be valid */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) static void p4_pmu_enable_pebs(u64 config)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) struct p4_pebs_bind *bind;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) unsigned int idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) BUILD_BUG_ON(P4_PEBS_METRIC__max > P4_PEBS_CONFIG_METRIC_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) idx = p4_config_unpack_metric(config);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) if (idx == P4_PEBS_METRIC__none)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) bind = &p4_pebs_bind_map[idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) (void)wrmsrl_safe(MSR_IA32_PEBS_ENABLE, (u64)bind->metric_pebs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) (void)wrmsrl_safe(MSR_P4_PEBS_MATRIX_VERT, (u64)bind->metric_vert);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) static void p4_pmu_enable_event(struct perf_event *event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) struct hw_perf_event *hwc = &event->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) int thread = p4_ht_config_thread(hwc->config);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) u64 escr_conf = p4_config_unpack_escr(p4_clear_ht_bit(hwc->config));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) unsigned int idx = p4_config_unpack_event(hwc->config);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) struct p4_event_bind *bind;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) u64 escr_addr, cccr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) bind = &p4_event_bind_map[idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) escr_addr = bind->escr_msr[thread];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) * - we dont support cascaded counters yet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) * - and counter 1 is broken (erratum)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) WARN_ON_ONCE(p4_is_event_cascaded(hwc->config));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) WARN_ON_ONCE(hwc->idx == 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) /* we need a real Event value */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) escr_conf &= ~P4_ESCR_EVENT_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) escr_conf |= P4_ESCR_EVENT(P4_OPCODE_EVNT(bind->opcode));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) cccr = p4_config_unpack_cccr(hwc->config);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) * it could be Cache event so we need to write metrics
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) * into additional MSRs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) p4_pmu_enable_pebs(hwc->config);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) (void)wrmsrl_safe(escr_addr, escr_conf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) (void)wrmsrl_safe(hwc->config_base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) (cccr & ~P4_CCCR_RESERVED) | P4_CCCR_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) static void p4_pmu_enable_all(int added)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) int idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) for (idx = 0; idx < x86_pmu.num_counters; idx++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) struct perf_event *event = cpuc->events[idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) if (!test_bit(idx, cpuc->active_mask))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) p4_pmu_enable_event(event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) static int p4_pmu_handle_irq(struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) struct perf_sample_data data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) struct cpu_hw_events *cpuc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) struct perf_event *event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) struct hw_perf_event *hwc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) int idx, handled = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) u64 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) cpuc = this_cpu_ptr(&cpu_hw_events);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) for (idx = 0; idx < x86_pmu.num_counters; idx++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) int overflow;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) if (!test_bit(idx, cpuc->active_mask)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) /* catch in-flight IRQs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) if (__test_and_clear_bit(idx, cpuc->running))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) handled++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) event = cpuc->events[idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) hwc = &event->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) WARN_ON_ONCE(hwc->idx != idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) /* it might be unflagged overflow */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) overflow = p4_pmu_clear_cccr_ovf(hwc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) val = x86_perf_event_update(event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) if (!overflow && (val & (1ULL << (x86_pmu.cntval_bits - 1))))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) handled += overflow;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) /* event overflow for sure */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) perf_sample_data_init(&data, 0, hwc->last_period);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) if (!x86_perf_event_set_period(event))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) if (perf_event_overflow(event, &data, regs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) x86_pmu_stop(event, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) if (handled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) inc_irq_stat(apic_perf_irqs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) * When dealing with the unmasking of the LVTPC on P4 perf hw, it has
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) * been observed that the OVF bit flag has to be cleared first _before_
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) * the LVTPC can be unmasked.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) * The reason is the NMI line will continue to be asserted while the OVF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) * bit is set. This causes a second NMI to generate if the LVTPC is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) * unmasked before the OVF bit is cleared, leading to unknown NMI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) * messages.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) apic_write(APIC_LVTPC, APIC_DM_NMI);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) return handled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) * swap thread specific fields according to a thread
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) * we are going to run on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) static void p4_pmu_swap_config_ts(struct hw_perf_event *hwc, int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) u32 escr, cccr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) * we either lucky and continue on same cpu or no HT support
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) if (!p4_should_swap_ts(hwc->config, cpu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) * the event is migrated from an another logical
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) * cpu, so we need to swap thread specific flags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) escr = p4_config_unpack_escr(hwc->config);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) cccr = p4_config_unpack_cccr(hwc->config);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) if (p4_ht_thread(cpu)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) cccr &= ~P4_CCCR_OVF_PMI_T0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) cccr |= P4_CCCR_OVF_PMI_T1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) if (escr & P4_ESCR_T0_OS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) escr &= ~P4_ESCR_T0_OS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) escr |= P4_ESCR_T1_OS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) if (escr & P4_ESCR_T0_USR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) escr &= ~P4_ESCR_T0_USR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) escr |= P4_ESCR_T1_USR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) hwc->config = p4_config_pack_escr(escr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) hwc->config |= p4_config_pack_cccr(cccr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) hwc->config |= P4_CONFIG_HT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) cccr &= ~P4_CCCR_OVF_PMI_T1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) cccr |= P4_CCCR_OVF_PMI_T0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) if (escr & P4_ESCR_T1_OS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) escr &= ~P4_ESCR_T1_OS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) escr |= P4_ESCR_T0_OS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) if (escr & P4_ESCR_T1_USR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) escr &= ~P4_ESCR_T1_USR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) escr |= P4_ESCR_T0_USR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) hwc->config = p4_config_pack_escr(escr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) hwc->config |= p4_config_pack_cccr(cccr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) hwc->config &= ~P4_CONFIG_HT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) * ESCR address hashing is tricky, ESCRs are not sequential
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) * in memory but all starts from MSR_P4_BSU_ESCR0 (0x03a0) and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) * the metric between any ESCRs is laid in range [0xa0,0xe1]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) * so we make ~70% filled hashtable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) #define P4_ESCR_MSR_BASE 0x000003a0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) #define P4_ESCR_MSR_MAX 0x000003e1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) #define P4_ESCR_MSR_TABLE_SIZE (P4_ESCR_MSR_MAX - P4_ESCR_MSR_BASE + 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) #define P4_ESCR_MSR_IDX(msr) (msr - P4_ESCR_MSR_BASE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) #define P4_ESCR_MSR_TABLE_ENTRY(msr) [P4_ESCR_MSR_IDX(msr)] = msr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) static const unsigned int p4_escr_table[P4_ESCR_MSR_TABLE_SIZE] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_ALF_ESCR0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_ALF_ESCR1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_BPU_ESCR0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_BPU_ESCR1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_BSU_ESCR0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_BSU_ESCR1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_CRU_ESCR0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_CRU_ESCR1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_CRU_ESCR2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_CRU_ESCR3),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_CRU_ESCR4),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_CRU_ESCR5),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_DAC_ESCR0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_DAC_ESCR1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_FIRM_ESCR0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_FIRM_ESCR1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_FLAME_ESCR0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_FLAME_ESCR1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_FSB_ESCR0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_FSB_ESCR1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_IQ_ESCR0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_IQ_ESCR1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_IS_ESCR0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_IS_ESCR1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_ITLB_ESCR0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_ITLB_ESCR1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_IX_ESCR0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_IX_ESCR1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_MOB_ESCR0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_MOB_ESCR1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_MS_ESCR0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_MS_ESCR1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_PMH_ESCR0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_PMH_ESCR1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_RAT_ESCR0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_RAT_ESCR1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_SAAT_ESCR0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_SAAT_ESCR1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_SSU_ESCR0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_SSU_ESCR1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_TBPU_ESCR0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_TBPU_ESCR1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_TC_ESCR0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_TC_ESCR1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_U2L_ESCR0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_U2L_ESCR1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) static int p4_get_escr_idx(unsigned int addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) unsigned int idx = P4_ESCR_MSR_IDX(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) if (unlikely(idx >= P4_ESCR_MSR_TABLE_SIZE ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) !p4_escr_table[idx] ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) p4_escr_table[idx] != addr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) WARN_ONCE(1, "P4 PMU: Wrong address passed: %x\n", addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) return idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) static int p4_next_cntr(int thread, unsigned long *used_mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) struct p4_event_bind *bind)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) int i, j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) for (i = 0; i < P4_CNTR_LIMIT; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) j = bind->cntr[thread][i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) if (j != -1 && !test_bit(j, used_mask))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) return j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) static int p4_pmu_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) unsigned long used_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) unsigned long escr_mask[BITS_TO_LONGS(P4_ESCR_MSR_TABLE_SIZE)];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) int cpu = smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) struct hw_perf_event *hwc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) struct p4_event_bind *bind;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) unsigned int i, thread, num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) int cntr_idx, escr_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) u64 config_alias;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) int pass;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) bitmap_zero(used_mask, X86_PMC_IDX_MAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) bitmap_zero(escr_mask, P4_ESCR_MSR_TABLE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) for (i = 0, num = n; i < n; i++, num--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) hwc = &cpuc->event_list[i]->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) thread = p4_ht_thread(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) pass = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) again:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) * It's possible to hit a circular lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) * between original and alternative events
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) * if both are scheduled already.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) if (pass > 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) bind = p4_config_get_bind(hwc->config);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) escr_idx = p4_get_escr_idx(bind->escr_msr[thread]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) if (unlikely(escr_idx == -1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) if (hwc->idx != -1 && !p4_should_swap_ts(hwc->config, cpu)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) cntr_idx = hwc->idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) if (assign)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) assign[i] = hwc->idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) goto reserve;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) cntr_idx = p4_next_cntr(thread, used_mask, bind);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) if (cntr_idx == -1 || test_bit(escr_idx, escr_mask)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) * Check whether an event alias is still available.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) config_alias = p4_get_alias_event(hwc->config);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) if (!config_alias)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) hwc->config = config_alias;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) pass++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) goto again;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) * Perf does test runs to see if a whole group can be assigned
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) * together successfully. There can be multiple rounds of this.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) * Unfortunately, p4_pmu_swap_config_ts touches the hwc->config
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) * bits, such that the next round of group assignments will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) * cause the above p4_should_swap_ts to pass instead of fail.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) * This leads to counters exclusive to thread0 being used by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) * thread1.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) * Solve this with a cheap hack, reset the idx back to -1 to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) * force a new lookup (p4_next_cntr) to get the right counter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) * for the right thread.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) * This probably doesn't comply with the general spirit of how
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) * perf wants to work, but P4 is special. :-(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) if (p4_should_swap_ts(hwc->config, cpu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) hwc->idx = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) p4_pmu_swap_config_ts(hwc, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) if (assign)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) assign[i] = cntr_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) reserve:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) set_bit(cntr_idx, used_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) set_bit(escr_idx, escr_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) return num ? -EINVAL : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) PMU_FORMAT_ATTR(cccr, "config:0-31" );
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) PMU_FORMAT_ATTR(escr, "config:32-62");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) PMU_FORMAT_ATTR(ht, "config:63" );
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) static struct attribute *intel_p4_formats_attr[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) &format_attr_cccr.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) &format_attr_escr.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) &format_attr_ht.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) static __initconst const struct x86_pmu p4_pmu = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) .name = "Netburst P4/Xeon",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) .handle_irq = p4_pmu_handle_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) .disable_all = p4_pmu_disable_all,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) .enable_all = p4_pmu_enable_all,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) .enable = p4_pmu_enable_event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) .disable = p4_pmu_disable_event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) .eventsel = MSR_P4_BPU_CCCR0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) .perfctr = MSR_P4_BPU_PERFCTR0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) .event_map = p4_pmu_event_map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) .max_events = ARRAY_SIZE(p4_general_events),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) .get_event_constraints = x86_get_event_constraints,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) * IF HT disabled we may need to use all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) * ARCH_P4_MAX_CCCR counters simulaneously
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) * though leave it restricted at moment assuming
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) * HT is on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) .num_counters = ARCH_P4_MAX_CCCR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) .apic = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) .cntval_bits = ARCH_P4_CNTRVAL_BITS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) .cntval_mask = ARCH_P4_CNTRVAL_MASK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) .max_period = (1ULL << (ARCH_P4_CNTRVAL_BITS - 1)) - 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) .hw_config = p4_hw_config,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) .schedule_events = p4_pmu_schedule_events,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) * This handles erratum N15 in intel doc 249199-029,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) * the counter may not be updated correctly on write
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) * so we need a second write operation to do the trick
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) * (the official workaround didn't work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) * the former idea is taken from OProfile code
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) .perfctr_second_write = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) .format_attrs = intel_p4_formats_attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) __init int p4_pmu_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) unsigned int low, high;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) int i, reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) /* If we get stripped -- indexing fails */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) BUILD_BUG_ON(ARCH_P4_MAX_CCCR > INTEL_PMC_MAX_GENERIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) rdmsr(MSR_IA32_MISC_ENABLE, low, high);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) if (!(low & (1 << 7))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) pr_cont("unsupported Netburst CPU model %d ",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) boot_cpu_data.x86_model);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) memcpy(hw_cache_event_ids, p4_hw_cache_event_ids,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) sizeof(hw_cache_event_ids));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) pr_cont("Netburst events, ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) x86_pmu = p4_pmu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) * Even though the counters are configured to interrupt a particular
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) * logical processor when an overflow happens, testing has shown that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) * on kdump kernels (which uses a single cpu), thread1's counter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) * continues to run and will report an NMI on thread0. Due to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) * overflow bug, this leads to a stream of unknown NMIs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) * Solve this by zero'ing out the registers to mimic a reset.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) for (i = 0; i < x86_pmu.num_counters; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) reg = x86_pmu_config_addr(i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) wrmsrl_safe(reg, 0ULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) }