^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Hypervisor supplied "24x7" performance counter support
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Author: Cody P Schafer <cody@linux.vnet.ibm.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Copyright 2014 IBM Corporation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #define pr_fmt(fmt) "hv-24x7: " fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/perf_event.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/rbtree.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/vmalloc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <asm/cputhreads.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <asm/firmware.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <asm/hvcall.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <asm/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/byteorder/generic.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <asm/rtas.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include "hv-24x7.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include "hv-24x7-catalog.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include "hv-common.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) /* Version of the 24x7 hypervisor API that we should use in this machine. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) static int interface_version;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) /* Whether we have to aggregate result data for some domains. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) static bool aggregate_result_elements;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) static cpumask_t hv_24x7_cpumask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) static bool domain_is_valid(unsigned domain)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) switch (domain) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #define DOMAIN(n, v, x, c) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) case HV_PERF_DOMAIN_##n: \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) /* fall through */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #include "hv-24x7-domains.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #undef DOMAIN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) static bool is_physical_domain(unsigned domain)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) switch (domain) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) #define DOMAIN(n, v, x, c) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) case HV_PERF_DOMAIN_##n: \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) return c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) #include "hv-24x7-domains.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) #undef DOMAIN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) * The Processor Module Information system parameter allows transferring
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) * of certain processor module information from the platform to the OS.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) * Refer PAPR+ document to get parameter token value as '43'.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) #define PROCESSOR_MODULE_INFO 43
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) static u32 phys_sockets; /* Physical sockets */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) static u32 phys_chipspersocket; /* Physical chips per socket*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) static u32 phys_coresperchip; /* Physical cores per chip */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) * read_24x7_sys_info()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) * Retrieve the number of sockets and chips per socket and cores per
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) * chip details through the get-system-parameter rtas call.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) void read_24x7_sys_info(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) int call_status, len, ntypes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) spin_lock(&rtas_data_buf_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) * Making system parameter: chips and sockets and cores per chip
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) * default to 1.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) phys_sockets = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) phys_chipspersocket = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) phys_coresperchip = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) call_status = rtas_call(rtas_token("ibm,get-system-parameter"), 3, 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) PROCESSOR_MODULE_INFO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) __pa(rtas_data_buf),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) RTAS_DATA_BUF_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) if (call_status != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) pr_err("Error calling get-system-parameter %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) call_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) len = be16_to_cpup((__be16 *)&rtas_data_buf[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) if (len < 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) ntypes = be16_to_cpup((__be16 *)&rtas_data_buf[2]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) if (!ntypes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) phys_sockets = be16_to_cpup((__be16 *)&rtas_data_buf[4]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) phys_chipspersocket = be16_to_cpup((__be16 *)&rtas_data_buf[6]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) phys_coresperchip = be16_to_cpup((__be16 *)&rtas_data_buf[8]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) spin_unlock(&rtas_data_buf_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) /* Domains for which more than one result element are returned for each event. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) static bool domain_needs_aggregation(unsigned int domain)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) return aggregate_result_elements &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) (domain == HV_PERF_DOMAIN_PHYS_CORE ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) (domain >= HV_PERF_DOMAIN_VCPU_HOME_CORE &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) domain <= HV_PERF_DOMAIN_VCPU_REMOTE_NODE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) static const char *domain_name(unsigned domain)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) if (!domain_is_valid(domain))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) switch (domain) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) case HV_PERF_DOMAIN_PHYS_CHIP: return "Physical Chip";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) case HV_PERF_DOMAIN_PHYS_CORE: return "Physical Core";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) case HV_PERF_DOMAIN_VCPU_HOME_CORE: return "VCPU Home Core";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) case HV_PERF_DOMAIN_VCPU_HOME_CHIP: return "VCPU Home Chip";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) case HV_PERF_DOMAIN_VCPU_HOME_NODE: return "VCPU Home Node";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) case HV_PERF_DOMAIN_VCPU_REMOTE_NODE: return "VCPU Remote Node";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) WARN_ON_ONCE(domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) static bool catalog_entry_domain_is_valid(unsigned domain)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) /* POWER8 doesn't support virtual domains. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) if (interface_version == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) return is_physical_domain(domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) return domain_is_valid(domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) * TODO: Merging events:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) * - Think of the hcall as an interface to a 4d array of counters:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) * - x = domains
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) * - y = indexes in the domain (core, chip, vcpu, node, etc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) * - z = offset into the counter space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) * - w = lpars (guest vms, "logical partitions")
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) * - A single request is: x,y,y_last,z,z_last,w,w_last
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) * - this means we can retrieve a rectangle of counters in y,z for a single x.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) * - Things to consider (ignoring w):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) * - input cost_per_request = 16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) * - output cost_per_result(ys,zs) = 8 + 8 * ys + ys * zs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) * - limited number of requests per hcall (must fit into 4K bytes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) * - 4k = 16 [buffer header] - 16 [request size] * request_count
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) * - 255 requests per hcall
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) * - sometimes it will be more efficient to read extra data and discard
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) * Example usage:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) * perf stat -e 'hv_24x7/domain=2,offset=8,vcpu=0,lpar=0xffffffff/'
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) /* u3 0-6, one of HV_24X7_PERF_DOMAIN */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) EVENT_DEFINE_RANGE_FORMAT(domain, config, 0, 3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) /* u16 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) EVENT_DEFINE_RANGE_FORMAT(core, config, 16, 31);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) EVENT_DEFINE_RANGE_FORMAT(chip, config, 16, 31);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) EVENT_DEFINE_RANGE_FORMAT(vcpu, config, 16, 31);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) /* u32, see "data_offset" */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) EVENT_DEFINE_RANGE_FORMAT(offset, config, 32, 63);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) /* u16 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) EVENT_DEFINE_RANGE_FORMAT(lpar, config1, 0, 15);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) EVENT_DEFINE_RANGE(reserved1, config, 4, 15);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) EVENT_DEFINE_RANGE(reserved2, config1, 16, 63);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) EVENT_DEFINE_RANGE(reserved3, config2, 0, 63);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) static struct attribute *format_attrs[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) &format_attr_domain.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) &format_attr_offset.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) &format_attr_core.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) &format_attr_chip.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) &format_attr_vcpu.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) &format_attr_lpar.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) static struct attribute_group format_group = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) .name = "format",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) .attrs = format_attrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) static struct attribute_group event_group = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) .name = "events",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) /* .attrs is set in init */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) static struct attribute_group event_desc_group = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) .name = "event_descs",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) /* .attrs is set in init */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) static struct attribute_group event_long_desc_group = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) .name = "event_long_descs",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) /* .attrs is set in init */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) static struct kmem_cache *hv_page_cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) DEFINE_PER_CPU(int, hv_24x7_txn_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) DEFINE_PER_CPU(int, hv_24x7_txn_err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) struct hv_24x7_hw {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) struct perf_event *events[255];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) DEFINE_PER_CPU(struct hv_24x7_hw, hv_24x7_hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) * request_buffer and result_buffer are not required to be 4k aligned,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) * but are not allowed to cross any 4k boundary. Aligning them to 4k is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) * the simplest way to ensure that.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) #define H24x7_DATA_BUFFER_SIZE 4096
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) DEFINE_PER_CPU(char, hv_24x7_reqb[H24x7_DATA_BUFFER_SIZE]) __aligned(4096);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) DEFINE_PER_CPU(char, hv_24x7_resb[H24x7_DATA_BUFFER_SIZE]) __aligned(4096);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) static unsigned int max_num_requests(int interface_version)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) return (H24x7_DATA_BUFFER_SIZE - sizeof(struct hv_24x7_request_buffer))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) / H24x7_REQUEST_SIZE(interface_version);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) static char *event_name(struct hv_24x7_event_data *ev, int *len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) *len = be16_to_cpu(ev->event_name_len) - 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) return (char *)ev->remainder;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) static char *event_desc(struct hv_24x7_event_data *ev, int *len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) unsigned nl = be16_to_cpu(ev->event_name_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) __be16 *desc_len = (__be16 *)(ev->remainder + nl - 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) *len = be16_to_cpu(*desc_len) - 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) return (char *)ev->remainder + nl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) static char *event_long_desc(struct hv_24x7_event_data *ev, int *len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) unsigned nl = be16_to_cpu(ev->event_name_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) __be16 *desc_len_ = (__be16 *)(ev->remainder + nl - 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) unsigned desc_len = be16_to_cpu(*desc_len_);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) __be16 *long_desc_len = (__be16 *)(ev->remainder + nl + desc_len - 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) *len = be16_to_cpu(*long_desc_len) - 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) return (char *)ev->remainder + nl + desc_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) static bool event_fixed_portion_is_within(struct hv_24x7_event_data *ev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) void *end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) void *start = ev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) return (start + offsetof(struct hv_24x7_event_data, remainder)) < end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) * Things we don't check:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) * - padding for desc, name, and long/detailed desc is required to be '\0'
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) * bytes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) * Return NULL if we pass end,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) * Otherwise return the address of the byte just following the event.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) static void *event_end(struct hv_24x7_event_data *ev, void *end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) void *start = ev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) __be16 *dl_, *ldl_;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) unsigned dl, ldl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) unsigned nl = be16_to_cpu(ev->event_name_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) if (nl < 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) pr_debug("%s: name length too short: %d", __func__, nl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) if (start + nl > end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) pr_debug("%s: start=%p + nl=%u > end=%p",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) __func__, start, nl, end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) dl_ = (__be16 *)(ev->remainder + nl - 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) if (!IS_ALIGNED((uintptr_t)dl_, 2))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) pr_warn("desc len not aligned %p", dl_);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) dl = be16_to_cpu(*dl_);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) if (dl < 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) pr_debug("%s: desc len too short: %d", __func__, dl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) if (start + nl + dl > end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) pr_debug("%s: (start=%p + nl=%u + dl=%u)=%p > end=%p",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) __func__, start, nl, dl, start + nl + dl, end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) ldl_ = (__be16 *)(ev->remainder + nl + dl - 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) if (!IS_ALIGNED((uintptr_t)ldl_, 2))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) pr_warn("long desc len not aligned %p", ldl_);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) ldl = be16_to_cpu(*ldl_);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) if (ldl < 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) pr_debug("%s: long desc len too short (ldl=%u)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) __func__, ldl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) if (start + nl + dl + ldl > end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) pr_debug("%s: start=%p + nl=%u + dl=%u + ldl=%u > end=%p",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) __func__, start, nl, dl, ldl, end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) return start + nl + dl + ldl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) static long h_get_24x7_catalog_page_(unsigned long phys_4096,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) unsigned long version, unsigned long index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) pr_devel("h_get_24x7_catalog_page(0x%lx, %lu, %lu)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) phys_4096, version, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) WARN_ON(!IS_ALIGNED(phys_4096, 4096));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) return plpar_hcall_norets(H_GET_24X7_CATALOG_PAGE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) phys_4096, version, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) static long h_get_24x7_catalog_page(char page[], u64 version, u32 index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) return h_get_24x7_catalog_page_(virt_to_phys(page),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) version, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) * Each event we find in the catalog, will have a sysfs entry. Format the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) * data for this sysfs entry based on the event's domain.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) * Events belonging to the Chip domain can only be monitored in that domain.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) * i.e the domain for these events is a fixed/knwon value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) * Events belonging to the Core domain can be monitored either in the physical
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) * core or in one of the virtual CPU domains. So the domain value for these
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) * events must be specified by the user (i.e is a required parameter). Format
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) * the Core events with 'domain=?' so the perf-tool can error check required
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) * parameters.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) * NOTE: For the Core domain events, rather than making domain a required
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) * parameter we could default it to PHYS_CORE and allowe users to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) * override the domain to one of the VCPU domains.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) * However, this can make the interface a little inconsistent.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) * If we set domain=2 (PHYS_CHIP) and allow user to override this field
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) * the user may be tempted to also modify the "offset=x" field in which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) * can lead to confusing usage. Consider the HPM_PCYC (offset=0x18) and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) * HPM_INST (offset=0x20) events. With:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) * perf stat -e hv_24x7/HPM_PCYC,offset=0x20/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) * we end up monitoring HPM_INST, while the command line has HPM_PCYC.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) * By not assigning a default value to the domain for the Core events,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) * we can have simple guidelines:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) * - Specifying values for parameters with "=?" is required.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) * - Specifying (i.e overriding) values for other parameters
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) * is undefined.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) static char *event_fmt(struct hv_24x7_event_data *event, unsigned domain)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) const char *sindex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) const char *lpar;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) const char *domain_str;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) char buf[8];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) switch (domain) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) case HV_PERF_DOMAIN_PHYS_CHIP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) snprintf(buf, sizeof(buf), "%d", domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) domain_str = buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) lpar = "0x0";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) sindex = "chip";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) case HV_PERF_DOMAIN_PHYS_CORE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) domain_str = "?";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) lpar = "0x0";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) sindex = "core";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) domain_str = "?";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) lpar = "?";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) sindex = "vcpu";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) return kasprintf(GFP_KERNEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) "domain=%s,offset=0x%x,%s=?,lpar=%s",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) domain_str,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) be16_to_cpu(event->event_counter_offs) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) be16_to_cpu(event->event_group_record_offs),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) sindex,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) lpar);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) /* Avoid trusting fw to NUL terminate strings */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) static char *memdup_to_str(char *maybe_str, int max_len, gfp_t gfp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) return kasprintf(gfp, "%.*s", max_len, maybe_str);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) static ssize_t device_show_string(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) struct device_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) struct dev_ext_attribute *d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) d = container_of(attr, struct dev_ext_attribute, attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) return sprintf(buf, "%s\n", (char *)d->var);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) static ssize_t cpumask_show(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) struct device_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) return cpumap_print_to_pagebuf(true, buf, &hv_24x7_cpumask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) static ssize_t sockets_show(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) struct device_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) return sprintf(buf, "%d\n", phys_sockets);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) static ssize_t chipspersocket_show(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) struct device_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) return sprintf(buf, "%d\n", phys_chipspersocket);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) static ssize_t coresperchip_show(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) struct device_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) return sprintf(buf, "%d\n", phys_coresperchip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) static struct attribute *device_str_attr_create_(char *name, char *str)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) struct dev_ext_attribute *attr = kzalloc(sizeof(*attr), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) if (!attr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) sysfs_attr_init(&attr->attr.attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) attr->var = str;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) attr->attr.attr.name = name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) attr->attr.attr.mode = 0444;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) attr->attr.show = device_show_string;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) return &attr->attr.attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) * Allocate and initialize strings representing event attributes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) * NOTE: The strings allocated here are never destroyed and continue to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) * exist till shutdown. This is to allow us to create as many events
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) * from the catalog as possible, even if we encounter errors with some.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) * In case of changes to error paths in future, these may need to be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) * freed by the caller.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) static struct attribute *device_str_attr_create(char *name, int name_max,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) int name_nonce,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) char *str, size_t str_max)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) char *n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) char *s = memdup_to_str(str, str_max, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) struct attribute *a;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) if (!s)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) if (!name_nonce)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) n = kasprintf(GFP_KERNEL, "%.*s", name_max, name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) n = kasprintf(GFP_KERNEL, "%.*s__%d", name_max, name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) name_nonce);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) if (!n)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) goto out_s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) a = device_str_attr_create_(n, s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) if (!a)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) goto out_n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) return a;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) out_n:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) kfree(n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) out_s:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) kfree(s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) static struct attribute *event_to_attr(unsigned ix,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) struct hv_24x7_event_data *event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) unsigned domain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) int nonce)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) int event_name_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) char *ev_name, *a_ev_name, *val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) struct attribute *attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) if (!domain_is_valid(domain)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) pr_warn("catalog event %u has invalid domain %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) ix, domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) val = event_fmt(event, domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) if (!val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) ev_name = event_name(event, &event_name_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) if (!nonce)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) a_ev_name = kasprintf(GFP_KERNEL, "%.*s",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) (int)event_name_len, ev_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) a_ev_name = kasprintf(GFP_KERNEL, "%.*s__%d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) (int)event_name_len, ev_name, nonce);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) if (!a_ev_name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) goto out_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) attr = device_str_attr_create_(a_ev_name, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) if (!attr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) goto out_name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) return attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) out_name:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) kfree(a_ev_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) out_val:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) kfree(val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) static struct attribute *event_to_desc_attr(struct hv_24x7_event_data *event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) int nonce)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) int nl, dl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) char *name = event_name(event, &nl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) char *desc = event_desc(event, &dl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) /* If there isn't a description, don't create the sysfs file */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) if (!dl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) return device_str_attr_create(name, nl, nonce, desc, dl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) static struct attribute *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) event_to_long_desc_attr(struct hv_24x7_event_data *event, int nonce)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) int nl, dl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) char *name = event_name(event, &nl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) char *desc = event_long_desc(event, &dl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) /* If there isn't a description, don't create the sysfs file */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) if (!dl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) return device_str_attr_create(name, nl, nonce, desc, dl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) static int event_data_to_attrs(unsigned ix, struct attribute **attrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) struct hv_24x7_event_data *event, int nonce)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) *attrs = event_to_attr(ix, event, event->domain, nonce);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) if (!*attrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) /* */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) struct event_uniq {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) struct rb_node node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) const char *name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) int nl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) unsigned ct;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) unsigned domain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) static int memord(const void *d1, size_t s1, const void *d2, size_t s2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) if (s1 < s2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) if (s1 > s2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) return memcmp(d1, d2, s1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) static int ev_uniq_ord(const void *v1, size_t s1, unsigned d1, const void *v2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) size_t s2, unsigned d2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) int r = memord(v1, s1, v2, s2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) if (d1 > d2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) if (d2 > d1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) static int event_uniq_add(struct rb_root *root, const char *name, int nl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) unsigned domain)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) struct rb_node **new = &(root->rb_node), *parent = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) struct event_uniq *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) /* Figure out where to put new node */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) while (*new) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) struct event_uniq *it;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) int result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) it = rb_entry(*new, struct event_uniq, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) result = ev_uniq_ord(name, nl, domain, it->name, it->nl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) it->domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) parent = *new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) if (result < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) new = &((*new)->rb_left);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) else if (result > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) new = &((*new)->rb_right);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) it->ct++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) pr_info("found a duplicate event %.*s, ct=%u\n", nl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) name, it->ct);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) return it->ct;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) data = kmalloc(sizeof(*data), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) if (!data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) *data = (struct event_uniq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) .name = name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) .nl = nl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) .ct = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) .domain = domain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) /* Add new node and rebalance tree. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) rb_link_node(&data->node, parent, new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) rb_insert_color(&data->node, root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) /* data->ct */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) static void event_uniq_destroy(struct rb_root *root)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) * the strings we point to are in the giant block of memory filled by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) * the catalog, and are freed separately.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) struct event_uniq *pos, *n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) rbtree_postorder_for_each_entry_safe(pos, n, root, node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) kfree(pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) * ensure the event structure's sizes are self consistent and don't cause us to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) * read outside of the event
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) * On success, return the event length in bytes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) * Otherwise, return -1 (and print as appropriate).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) static ssize_t catalog_event_len_validate(struct hv_24x7_event_data *event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) size_t event_idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) size_t event_data_bytes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) size_t event_entry_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) size_t offset, void *end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) ssize_t ev_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) void *ev_end, *calc_ev_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) if (offset >= event_data_bytes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) if (event_idx >= event_entry_count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) pr_devel("catalog event data has %zu bytes of padding after last event\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) event_data_bytes - offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) if (!event_fixed_portion_is_within(event, end)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) pr_warn("event %zu fixed portion is not within range\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) event_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) ev_len = be16_to_cpu(event->length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) if (ev_len % 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) pr_info("event %zu has length %zu not divisible by 16: event=%pK\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) event_idx, ev_len, event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) ev_end = (__u8 *)event + ev_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) if (ev_end > end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) pr_warn("event %zu has .length=%zu, ends after buffer end: ev_end=%pK > end=%pK, offset=%zu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) event_idx, ev_len, ev_end, end,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) calc_ev_end = event_end(event, end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) if (!calc_ev_end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) pr_warn("event %zu has a calculated length which exceeds buffer length %zu: event=%pK end=%pK, offset=%zu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) event_idx, event_data_bytes, event, end,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) if (calc_ev_end > ev_end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) pr_warn("event %zu exceeds it's own length: event=%pK, end=%pK, offset=%zu, calc_ev_end=%pK\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) event_idx, event, ev_end, offset, calc_ev_end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) return ev_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) #define MAX_4K (SIZE_MAX / 4096)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) static int create_events_from_catalog(struct attribute ***events_,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) struct attribute ***event_descs_,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) struct attribute ***event_long_descs_)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) long hret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) size_t catalog_len, catalog_page_len, event_entry_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) event_data_len, event_data_offs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) event_data_bytes, junk_events, event_idx, event_attr_ct, i,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) attr_max, event_idx_last, desc_ct, long_desc_ct;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) ssize_t ct, ev_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) uint64_t catalog_version_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) struct attribute **events, **event_descs, **event_long_descs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) struct hv_24x7_catalog_page_0 *page_0 =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) kmem_cache_alloc(hv_page_cache, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) void *page = page_0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) void *event_data, *end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) struct hv_24x7_event_data *event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) struct rb_root ev_uniq = RB_ROOT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) if (!page) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) goto e_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) hret = h_get_24x7_catalog_page(page, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) if (hret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) ret = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) goto e_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) catalog_version_num = be64_to_cpu(page_0->version);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) catalog_page_len = be32_to_cpu(page_0->length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) if (MAX_4K < catalog_page_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) pr_err("invalid page count: %zu\n", catalog_page_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) ret = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) goto e_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) catalog_len = catalog_page_len * 4096;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) event_entry_count = be16_to_cpu(page_0->event_entry_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) event_data_offs = be16_to_cpu(page_0->event_data_offs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) event_data_len = be16_to_cpu(page_0->event_data_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) pr_devel("cv %llu cl %zu eec %zu edo %zu edl %zu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) catalog_version_num, catalog_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) event_entry_count, event_data_offs, event_data_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) if ((MAX_4K < event_data_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) || (MAX_4K < event_data_offs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) || (MAX_4K - event_data_offs < event_data_len)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) pr_err("invalid event data offs %zu and/or len %zu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) event_data_offs, event_data_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) ret = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) goto e_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) if ((event_data_offs + event_data_len) > catalog_page_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) pr_err("event data %zu-%zu does not fit inside catalog 0-%zu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) event_data_offs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) event_data_offs + event_data_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) catalog_page_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) ret = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) goto e_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) if (SIZE_MAX - 1 < event_entry_count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) pr_err("event_entry_count %zu is invalid\n", event_entry_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) ret = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) goto e_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) event_data_bytes = event_data_len * 4096;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) * event data can span several pages, events can cross between these
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) * pages. Use vmalloc to make this easier.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) event_data = vmalloc(event_data_bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) if (!event_data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) pr_err("could not allocate event data\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) goto e_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) end = event_data + event_data_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) * using vmalloc_to_phys() like this only works if PAGE_SIZE is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) * divisible by 4096
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) BUILD_BUG_ON(PAGE_SIZE % 4096);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) for (i = 0; i < event_data_len; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) hret = h_get_24x7_catalog_page_(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) vmalloc_to_phys(event_data + i * 4096),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) catalog_version_num,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) i + event_data_offs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) if (hret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) pr_err("Failed to get event data in page %zu: rc=%ld\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) i + event_data_offs, hret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) ret = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) goto e_event_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) * scan the catalog to determine the number of attributes we need, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) * verify it at the same time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) for (junk_events = 0, event = event_data, event_idx = 0, attr_max = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) event_idx++, event = (void *)event + ev_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) size_t offset = (void *)event - (void *)event_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) char *name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) int nl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) ev_len = catalog_event_len_validate(event, event_idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) event_data_bytes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) event_entry_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) offset, end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) if (ev_len < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) name = event_name(event, &nl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) if (event->event_group_record_len == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) pr_devel("invalid event %zu (%.*s): group_record_len == 0, skipping\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) event_idx, nl, name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) junk_events++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) if (!catalog_entry_domain_is_valid(event->domain)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) pr_info("event %zu (%.*s) has invalid domain %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) event_idx, nl, name, event->domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) junk_events++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) attr_max++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) event_idx_last = event_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) if (event_idx_last != event_entry_count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) pr_warn("event buffer ended before listed # of events were parsed (got %zu, wanted %zu, junk %zu)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) event_idx_last, event_entry_count, junk_events);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) events = kmalloc_array(attr_max + 1, sizeof(*events), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) if (!events) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) goto e_event_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) event_descs = kmalloc_array(event_idx + 1, sizeof(*event_descs),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) if (!event_descs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) goto e_event_attrs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) event_long_descs = kmalloc_array(event_idx + 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) sizeof(*event_long_descs), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) if (!event_long_descs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) goto e_event_descs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) /* Iterate over the catalog filling in the attribute vector */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) for (junk_events = 0, event_attr_ct = 0, desc_ct = 0, long_desc_ct = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) event = event_data, event_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) event_idx < event_idx_last;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) event_idx++, ev_len = be16_to_cpu(event->length),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) event = (void *)event + ev_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) char *name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) int nl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) int nonce;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) * these are the only "bad" events that are intermixed and that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) * we can ignore without issue. make sure to skip them here
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) if (event->event_group_record_len == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) if (!catalog_entry_domain_is_valid(event->domain))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) name = event_name(event, &nl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) nonce = event_uniq_add(&ev_uniq, name, nl, event->domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) ct = event_data_to_attrs(event_idx, events + event_attr_ct,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) event, nonce);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) if (ct < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) pr_warn("event %zu (%.*s) creation failure, skipping\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) event_idx, nl, name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) junk_events++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) event_attr_ct++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) event_descs[desc_ct] = event_to_desc_attr(event, nonce);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) if (event_descs[desc_ct])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) desc_ct++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) event_long_descs[long_desc_ct] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) event_to_long_desc_attr(event, nonce);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) if (event_long_descs[long_desc_ct])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) long_desc_ct++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) pr_info("read %zu catalog entries, created %zu event attrs (%zu failures), %zu descs\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) event_idx, event_attr_ct, junk_events, desc_ct);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) events[event_attr_ct] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) event_descs[desc_ct] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) event_long_descs[long_desc_ct] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) event_uniq_destroy(&ev_uniq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) vfree(event_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) kmem_cache_free(hv_page_cache, page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) *events_ = events;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) *event_descs_ = event_descs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) *event_long_descs_ = event_long_descs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) e_event_descs:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) kfree(event_descs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) e_event_attrs:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) kfree(events);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) e_event_data:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) vfree(event_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) e_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) kmem_cache_free(hv_page_cache, page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) e_out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) *events_ = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) *event_descs_ = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) *event_long_descs_ = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) static ssize_t catalog_read(struct file *filp, struct kobject *kobj,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) struct bin_attribute *bin_attr, char *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) loff_t offset, size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) long hret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) ssize_t ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) size_t catalog_len = 0, catalog_page_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) loff_t page_offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) loff_t offset_in_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) size_t copy_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) uint64_t catalog_version_num = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) void *page = kmem_cache_alloc(hv_page_cache, GFP_USER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) struct hv_24x7_catalog_page_0 *page_0 = page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) if (!page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) hret = h_get_24x7_catalog_page(page, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) if (hret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) ret = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) goto e_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) catalog_version_num = be64_to_cpu(page_0->version);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) catalog_page_len = be32_to_cpu(page_0->length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) catalog_len = catalog_page_len * 4096;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) page_offset = offset / 4096;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) offset_in_page = offset % 4096;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) if (page_offset >= catalog_page_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) goto e_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) if (page_offset != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) hret = h_get_24x7_catalog_page(page, catalog_version_num,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) page_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) if (hret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) ret = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) goto e_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) copy_len = 4096 - offset_in_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) if (copy_len > count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) copy_len = count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) memcpy(buf, page+offset_in_page, copy_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) ret = copy_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) e_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) if (hret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) pr_err("h_get_24x7_catalog_page(ver=%lld, page=%lld) failed:"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) " rc=%ld\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) catalog_version_num, page_offset, hret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) kmem_cache_free(hv_page_cache, page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) pr_devel("catalog_read: offset=%lld(%lld) count=%zu "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) "catalog_len=%zu(%zu) => %zd\n", offset, page_offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) count, catalog_len, catalog_page_len, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) static ssize_t domains_show(struct device *dev, struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) char *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) int d, n, count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) const char *str;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) for (d = 0; d < HV_PERF_DOMAIN_MAX; d++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) str = domain_name(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) if (!str)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) n = sprintf(page, "%d: %s\n", d, str);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) if (n < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) count += n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) page += n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) #define PAGE_0_ATTR(_name, _fmt, _expr) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) static ssize_t _name##_show(struct device *dev, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) struct device_attribute *dev_attr, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) char *buf) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) long hret; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) ssize_t ret = 0; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) void *page = kmem_cache_alloc(hv_page_cache, GFP_USER); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) struct hv_24x7_catalog_page_0 *page_0 = page; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) if (!page) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) return -ENOMEM; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) hret = h_get_24x7_catalog_page(page, 0, 0); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) if (hret) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) ret = -EIO; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) goto e_free; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) } \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) ret = sprintf(buf, _fmt, _expr); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) e_free: \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) kmem_cache_free(hv_page_cache, page); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) return ret; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) } \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) static DEVICE_ATTR_RO(_name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) PAGE_0_ATTR(catalog_version, "%lld\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) (unsigned long long)be64_to_cpu(page_0->version));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) PAGE_0_ATTR(catalog_len, "%lld\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) (unsigned long long)be32_to_cpu(page_0->length) * 4096);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) static BIN_ATTR_RO(catalog, 0/* real length varies */);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) static DEVICE_ATTR_RO(domains);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) static DEVICE_ATTR_RO(sockets);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) static DEVICE_ATTR_RO(chipspersocket);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) static DEVICE_ATTR_RO(coresperchip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) static DEVICE_ATTR_RO(cpumask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) static struct bin_attribute *if_bin_attrs[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) &bin_attr_catalog,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) static struct attribute *cpumask_attrs[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) &dev_attr_cpumask.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) static struct attribute_group cpumask_attr_group = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) .attrs = cpumask_attrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) static struct attribute *if_attrs[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) &dev_attr_catalog_len.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) &dev_attr_catalog_version.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) &dev_attr_domains.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) &dev_attr_sockets.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) &dev_attr_chipspersocket.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) &dev_attr_coresperchip.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) static struct attribute_group if_group = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) .name = "interface",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) .bin_attrs = if_bin_attrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) .attrs = if_attrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) static const struct attribute_group *attr_groups[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) &format_group,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) &event_group,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) &event_desc_group,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) &event_long_desc_group,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) &if_group,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) &cpumask_attr_group,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) * Start the process for a new H_GET_24x7_DATA hcall.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) static void init_24x7_request(struct hv_24x7_request_buffer *request_buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) struct hv_24x7_data_result_buffer *result_buffer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) memset(request_buffer, 0, H24x7_DATA_BUFFER_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) memset(result_buffer, 0, H24x7_DATA_BUFFER_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) request_buffer->interface_version = interface_version;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) /* memset above set request_buffer->num_requests to 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) * Commit (i.e perform) the H_GET_24x7_DATA hcall using the data collected
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) * by 'init_24x7_request()' and 'add_event_to_24x7_request()'.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) static int make_24x7_request(struct hv_24x7_request_buffer *request_buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) struct hv_24x7_data_result_buffer *result_buffer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) long ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) * NOTE: Due to variable number of array elements in request and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) * result buffer(s), sizeof() is not reliable. Use the actual
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) * allocated buffer size, H24x7_DATA_BUFFER_SIZE.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) ret = plpar_hcall_norets(H_GET_24X7_DATA,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) virt_to_phys(request_buffer), H24x7_DATA_BUFFER_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) virt_to_phys(result_buffer), H24x7_DATA_BUFFER_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) struct hv_24x7_request *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) req = request_buffer->requests;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) pr_notice_ratelimited("hcall failed: [%d %#x %#x %d] => ret 0x%lx (%ld) detail=0x%x failing ix=%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) req->performance_domain, req->data_offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) req->starting_ix, req->starting_lpar_ix,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) ret, ret, result_buffer->detailed_rc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) result_buffer->failing_request_ix);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) * Add the given @event to the next slot in the 24x7 request_buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) * Note that H_GET_24X7_DATA hcall allows reading several counters'
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) * values in a single HCALL. We expect the caller to add events to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) * request buffer one by one, make the HCALL and process the results.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) static int add_event_to_24x7_request(struct perf_event *event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) struct hv_24x7_request_buffer *request_buffer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) u16 idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) size_t req_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) struct hv_24x7_request *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) if (request_buffer->num_requests >=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) max_num_requests(request_buffer->interface_version)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) pr_devel("Too many requests for 24x7 HCALL %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) request_buffer->num_requests);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) switch (event_get_domain(event)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) case HV_PERF_DOMAIN_PHYS_CHIP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) idx = event_get_chip(event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) case HV_PERF_DOMAIN_PHYS_CORE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) idx = event_get_core(event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) idx = event_get_vcpu(event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) req_size = H24x7_REQUEST_SIZE(request_buffer->interface_version);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) i = request_buffer->num_requests++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) req = (void *) request_buffer->requests + i * req_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) req->performance_domain = event_get_domain(event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) req->data_size = cpu_to_be16(8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) req->data_offset = cpu_to_be32(event_get_offset(event));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) req->starting_lpar_ix = cpu_to_be16(event_get_lpar(event));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) req->max_num_lpars = cpu_to_be16(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) req->starting_ix = cpu_to_be16(idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) req->max_ix = cpu_to_be16(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) if (request_buffer->interface_version > 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) if (domain_needs_aggregation(req->performance_domain))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) req->max_num_thread_groups = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) else if (req->performance_domain != HV_PERF_DOMAIN_PHYS_CHIP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) req->starting_thread_group_ix = idx % 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) req->max_num_thread_groups = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) * get_count_from_result - get event count from all result elements in result
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) * If the event corresponding to this result needs aggregation of the result
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) * element values, then this function does that.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) * @event: Event associated with @res.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) * @resb: Result buffer containing @res.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) * @res: Result to work on.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) * @countp: Output variable containing the event count.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) * @next: Optional output variable pointing to the next result in @resb.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) static int get_count_from_result(struct perf_event *event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) struct hv_24x7_data_result_buffer *resb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) struct hv_24x7_result *res, u64 *countp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) struct hv_24x7_result **next)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) u16 num_elements = be16_to_cpu(res->num_elements_returned);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) u16 data_size = be16_to_cpu(res->result_element_data_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) unsigned int data_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) void *element_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) u64 count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) * We can bail out early if the result is empty.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) if (!num_elements) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) pr_debug("Result of request %hhu is empty, nothing to do\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) res->result_ix);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) if (next)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) *next = (struct hv_24x7_result *) res->elements;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) return -ENODATA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) * Since we always specify 1 as the maximum for the smallest resource
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) * we're requesting, there should to be only one element per result.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) * Except when an event needs aggregation, in which case there are more.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) if (num_elements != 1 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) !domain_needs_aggregation(event_get_domain(event))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) pr_err("Error: result of request %hhu has %hu elements\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) res->result_ix, num_elements);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) if (data_size != sizeof(u64)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) pr_debug("Error: result of request %hhu has data of %hu bytes\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) res->result_ix, data_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) return -ENOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) if (resb->interface_version == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) data_offset = offsetof(struct hv_24x7_result_element_v1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) element_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) data_offset = offsetof(struct hv_24x7_result_element_v2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) element_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) /* Go through the result elements in the result. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) for (i = count = 0, element_data = res->elements + data_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) i < num_elements;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) i++, element_data += data_size + data_offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) count += be64_to_cpu(*((u64 *) element_data));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) *countp = count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) /* The next result is after the last result element. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) if (next)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) *next = element_data - data_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) static int single_24x7_request(struct perf_event *event, u64 *count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) struct hv_24x7_request_buffer *request_buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) struct hv_24x7_data_result_buffer *result_buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) BUILD_BUG_ON(sizeof(*request_buffer) > 4096);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) BUILD_BUG_ON(sizeof(*result_buffer) > 4096);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) request_buffer = (void *)get_cpu_var(hv_24x7_reqb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) result_buffer = (void *)get_cpu_var(hv_24x7_resb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) init_24x7_request(request_buffer, result_buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) ret = add_event_to_24x7_request(event, request_buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) ret = make_24x7_request(request_buffer, result_buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) /* process result from hcall */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) ret = get_count_from_result(event, result_buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) result_buffer->results, count, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) put_cpu_var(hv_24x7_reqb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) put_cpu_var(hv_24x7_resb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) static int h_24x7_event_init(struct perf_event *event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) struct hv_perf_caps caps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) unsigned domain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) unsigned long hret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) u64 ct;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) /* Not our event */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) if (event->attr.type != event->pmu->type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) /* Unused areas must be 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) if (event_get_reserved1(event) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) event_get_reserved2(event) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) event_get_reserved3(event)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) pr_devel("reserved set when forbidden 0x%llx(0x%llx) 0x%llx(0x%llx) 0x%llx(0x%llx)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) event->attr.config,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) event_get_reserved1(event),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) event->attr.config1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) event_get_reserved2(event),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) event->attr.config2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) event_get_reserved3(event));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) /* no branch sampling */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) if (has_branch_stack(event))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) /* offset must be 8 byte aligned */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) if (event_get_offset(event) % 8) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) pr_devel("bad alignment\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) domain = event_get_domain(event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) if (domain >= HV_PERF_DOMAIN_MAX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) pr_devel("invalid domain %d\n", domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) hret = hv_perf_caps_get(&caps);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) if (hret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) pr_devel("could not get capabilities: rc=%ld\n", hret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) /* Physical domains & other lpars require extra capabilities */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) if (!caps.collect_privileged && (is_physical_domain(domain) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) (event_get_lpar(event) != event_get_lpar_max()))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) pr_devel("hv permissions disallow: is_physical_domain:%d, lpar=0x%llx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) is_physical_domain(domain),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) event_get_lpar(event));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) return -EACCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) /* Get the initial value of the counter for this event */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) if (single_24x7_request(event, &ct)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) pr_devel("test hcall failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) (void)local64_xchg(&event->hw.prev_count, ct);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) static u64 h_24x7_get_value(struct perf_event *event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) u64 ct;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) if (single_24x7_request(event, &ct))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) /* We checked this in event init, shouldn't fail here... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) return ct;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) static void update_event_count(struct perf_event *event, u64 now)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) s64 prev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) prev = local64_xchg(&event->hw.prev_count, now);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) local64_add(now - prev, &event->count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) static void h_24x7_event_read(struct perf_event *event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) u64 now;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) struct hv_24x7_request_buffer *request_buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) struct hv_24x7_hw *h24x7hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) int txn_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) txn_flags = __this_cpu_read(hv_24x7_txn_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) * If in a READ transaction, add this counter to the list of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) * counters to read during the next HCALL (i.e commit_txn()).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) * If not in a READ transaction, go ahead and make the HCALL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) * to read this counter by itself.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) if (txn_flags & PERF_PMU_TXN_READ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) if (__this_cpu_read(hv_24x7_txn_err))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) request_buffer = (void *)get_cpu_var(hv_24x7_reqb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) ret = add_event_to_24x7_request(event, request_buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) __this_cpu_write(hv_24x7_txn_err, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) * Associate the event with the HCALL request index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) * so ->commit_txn() can quickly find/update count.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) i = request_buffer->num_requests - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) h24x7hw = &get_cpu_var(hv_24x7_hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) h24x7hw->events[i] = event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) put_cpu_var(h24x7hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) put_cpu_var(hv_24x7_reqb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) now = h_24x7_get_value(event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) update_event_count(event, now);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) static void h_24x7_event_start(struct perf_event *event, int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) if (flags & PERF_EF_RELOAD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) local64_set(&event->hw.prev_count, h_24x7_get_value(event));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) static void h_24x7_event_stop(struct perf_event *event, int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) h_24x7_event_read(event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) static int h_24x7_event_add(struct perf_event *event, int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) if (flags & PERF_EF_START)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) h_24x7_event_start(event, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) * 24x7 counters only support READ transactions. They are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) * always counting and dont need/support ADD transactions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) * Cache the flags, but otherwise ignore transactions that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) * are not PERF_PMU_TXN_READ.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) static void h_24x7_event_start_txn(struct pmu *pmu, unsigned int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) struct hv_24x7_request_buffer *request_buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) struct hv_24x7_data_result_buffer *result_buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) /* We should not be called if we are already in a txn */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) WARN_ON_ONCE(__this_cpu_read(hv_24x7_txn_flags));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) __this_cpu_write(hv_24x7_txn_flags, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) if (flags & ~PERF_PMU_TXN_READ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) request_buffer = (void *)get_cpu_var(hv_24x7_reqb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) result_buffer = (void *)get_cpu_var(hv_24x7_resb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) init_24x7_request(request_buffer, result_buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) put_cpu_var(hv_24x7_resb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) put_cpu_var(hv_24x7_reqb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) * Clean up transaction state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) * NOTE: Ignore state of request and result buffers for now.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) * We will initialize them during the next read/txn.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) static void reset_txn(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) __this_cpu_write(hv_24x7_txn_flags, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) __this_cpu_write(hv_24x7_txn_err, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) * 24x7 counters only support READ transactions. They are always counting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) * and dont need/support ADD transactions. Clear ->txn_flags but otherwise
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) * ignore transactions that are not of type PERF_PMU_TXN_READ.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) * For READ transactions, submit all pending 24x7 requests (i.e requests
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) * that were queued by h_24x7_event_read()), to the hypervisor and update
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) * the event counts.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) static int h_24x7_event_commit_txn(struct pmu *pmu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) struct hv_24x7_request_buffer *request_buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) struct hv_24x7_data_result_buffer *result_buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) struct hv_24x7_result *res, *next_res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) u64 count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) int i, ret, txn_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) struct hv_24x7_hw *h24x7hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) txn_flags = __this_cpu_read(hv_24x7_txn_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) WARN_ON_ONCE(!txn_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) if (txn_flags & ~PERF_PMU_TXN_READ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) ret = __this_cpu_read(hv_24x7_txn_err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) request_buffer = (void *)get_cpu_var(hv_24x7_reqb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) result_buffer = (void *)get_cpu_var(hv_24x7_resb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) ret = make_24x7_request(request_buffer, result_buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) goto put_reqb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) h24x7hw = &get_cpu_var(hv_24x7_hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) /* Go through results in the result buffer to update event counts. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) for (i = 0, res = result_buffer->results;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) i < result_buffer->num_results; i++, res = next_res) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) struct perf_event *event = h24x7hw->events[res->result_ix];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) ret = get_count_from_result(event, result_buffer, res, &count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) &next_res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) update_event_count(event, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) put_cpu_var(hv_24x7_hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) put_reqb:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) put_cpu_var(hv_24x7_resb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) put_cpu_var(hv_24x7_reqb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) reset_txn();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) * 24x7 counters only support READ transactions. They are always counting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) * and dont need/support ADD transactions. However, regardless of type
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) * of transaction, all we need to do is cleanup, so we don't have to check
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) * the type of transaction.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) static void h_24x7_event_cancel_txn(struct pmu *pmu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) WARN_ON_ONCE(!__this_cpu_read(hv_24x7_txn_flags));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) reset_txn();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) static struct pmu h_24x7_pmu = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) .task_ctx_nr = perf_invalid_context,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) .name = "hv_24x7",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) .attr_groups = attr_groups,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) .event_init = h_24x7_event_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) .add = h_24x7_event_add,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) .del = h_24x7_event_stop,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) .start = h_24x7_event_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) .stop = h_24x7_event_stop,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) .read = h_24x7_event_read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) .start_txn = h_24x7_event_start_txn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) .commit_txn = h_24x7_event_commit_txn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) .cancel_txn = h_24x7_event_cancel_txn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) .capabilities = PERF_PMU_CAP_NO_EXCLUDE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) static int ppc_hv_24x7_cpu_online(unsigned int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) if (cpumask_empty(&hv_24x7_cpumask))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) cpumask_set_cpu(cpu, &hv_24x7_cpumask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) static int ppc_hv_24x7_cpu_offline(unsigned int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) int target;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) /* Check if exiting cpu is used for collecting 24x7 events */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) if (!cpumask_test_and_clear_cpu(cpu, &hv_24x7_cpumask))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) /* Find a new cpu to collect 24x7 events */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) target = cpumask_last(cpu_active_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) if (target < 0 || target >= nr_cpu_ids) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) pr_err("hv_24x7: CPU hotplug init failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) /* Migrate 24x7 events to the new target */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) cpumask_set_cpu(target, &hv_24x7_cpumask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) perf_pmu_migrate_context(&h_24x7_pmu, cpu, target);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) static int hv_24x7_cpu_hotplug_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) return cpuhp_setup_state(CPUHP_AP_PERF_POWERPC_HV_24x7_ONLINE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) "perf/powerpc/hv_24x7:online",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) ppc_hv_24x7_cpu_online,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) ppc_hv_24x7_cpu_offline);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) static int hv_24x7_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) unsigned long hret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) struct hv_perf_caps caps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) if (!firmware_has_feature(FW_FEATURE_LPAR)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) pr_debug("not a virtualized system, not enabling\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) } else if (!cur_cpu_spec->oprofile_cpu_type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) /* POWER8 only supports v1, while POWER9 only supports v2. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) if (!strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc64/power8"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) interface_version = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) interface_version = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) /* SMT8 in POWER9 needs to aggregate result elements. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) if (threads_per_core == 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) aggregate_result_elements = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) hret = hv_perf_caps_get(&caps);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) if (hret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) pr_debug("could not obtain capabilities, not enabling, rc=%ld\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) hret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) hv_page_cache = kmem_cache_create("hv-page-4096", 4096, 4096, 0, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) if (!hv_page_cache)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) /* sampling not supported */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) h_24x7_pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) r = create_events_from_catalog(&event_group.attrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) &event_desc_group.attrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) &event_long_desc_group.attrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) /* init cpuhotplug */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) r = hv_24x7_cpu_hotplug_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) r = perf_pmu_register(&h_24x7_pmu, h_24x7_pmu.name, -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) read_24x7_sys_info();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) device_initcall(hv_24x7_init);