Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * Hypervisor supplied "gpci" ("get performance counter info") performance
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  * counter support
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  * Author: Cody P Schafer <cody@linux.vnet.ibm.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  * Copyright 2014 IBM Corporation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) #define pr_fmt(fmt) "hv-gpci: " fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) #include <linux/perf_event.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) #include <asm/firmware.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) #include <asm/hvcall.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) #include <asm/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) #include "hv-gpci.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) #include "hv-common.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22)  * Example usage:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23)  *  perf stat -e 'hv_gpci/counter_info_version=3,offset=0,length=8,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24)  *		  secondary_index=0,starting_index=0xffffffff,request=0x10/' ...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) /* u32 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) EVENT_DEFINE_RANGE_FORMAT(request, config, 0, 31);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) /* u32 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31)  * Note that starting_index, phys_processor_idx, sibling_part_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32)  * hw_chip_id, partition_id all refer to the same bit range. They
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33)  * are basically aliases for the starting_index. The specific alias
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34)  * used depends on the event. See REQUEST_IDX_KIND in hv-gpci-requests.h
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) EVENT_DEFINE_RANGE_FORMAT(starting_index, config, 32, 63);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) EVENT_DEFINE_RANGE_FORMAT_LITE(phys_processor_idx, config, 32, 63);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) EVENT_DEFINE_RANGE_FORMAT_LITE(sibling_part_id, config, 32, 63);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) EVENT_DEFINE_RANGE_FORMAT_LITE(hw_chip_id, config, 32, 63);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) EVENT_DEFINE_RANGE_FORMAT_LITE(partition_id, config, 32, 63);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) /* u16 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) EVENT_DEFINE_RANGE_FORMAT(secondary_index, config1, 0, 15);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) /* u8 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) EVENT_DEFINE_RANGE_FORMAT(counter_info_version, config1, 16, 23);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) /* u8, bytes of data (1-8) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) EVENT_DEFINE_RANGE_FORMAT(length, config1, 24, 31);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) /* u32, byte offset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) EVENT_DEFINE_RANGE_FORMAT(offset, config1, 32, 63);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) static cpumask_t hv_gpci_cpumask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) static struct attribute *format_attrs[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 	&format_attr_request.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 	&format_attr_starting_index.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 	&format_attr_phys_processor_idx.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 	&format_attr_sibling_part_id.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 	&format_attr_hw_chip_id.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 	&format_attr_partition_id.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 	&format_attr_secondary_index.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 	&format_attr_counter_info_version.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) 	&format_attr_offset.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 	&format_attr_length.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 	NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) static struct attribute_group format_group = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 	.name = "format",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 	.attrs = format_attrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) static struct attribute_group event_group = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 	.name  = "events",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 	.attrs = hv_gpci_event_attrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) #define HV_CAPS_ATTR(_name, _format)				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) static ssize_t _name##_show(struct device *dev,			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 			    struct device_attribute *attr,	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 			    char *page)				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) {								\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 	struct hv_perf_caps caps;				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 	unsigned long hret = hv_perf_caps_get(&caps);		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 	if (hret)						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 		return -EIO;					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 								\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 	return sprintf(page, _format, caps._name);		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) }								\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) static struct device_attribute hv_caps_attr_##_name = __ATTR_RO(_name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) static ssize_t kernel_version_show(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 				   struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 				   char *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 	return sprintf(page, "0x%x\n", COUNTER_INFO_VERSION_CURRENT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) static ssize_t cpumask_show(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 			    struct device_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 	return cpumap_print_to_pagebuf(true, buf, &hv_gpci_cpumask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) static DEVICE_ATTR_RO(kernel_version);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) static DEVICE_ATTR_RO(cpumask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) HV_CAPS_ATTR(version, "0x%x\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) HV_CAPS_ATTR(ga, "%d\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) HV_CAPS_ATTR(expanded, "%d\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) HV_CAPS_ATTR(lab, "%d\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) HV_CAPS_ATTR(collect_privileged, "%d\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) static struct attribute *interface_attrs[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 	&dev_attr_kernel_version.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 	&hv_caps_attr_version.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 	&hv_caps_attr_ga.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 	&hv_caps_attr_expanded.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 	&hv_caps_attr_lab.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 	&hv_caps_attr_collect_privileged.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 	NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) static struct attribute *cpumask_attrs[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 	&dev_attr_cpumask.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 	NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) static struct attribute_group cpumask_attr_group = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 	.attrs = cpumask_attrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) static struct attribute_group interface_group = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 	.name = "interface",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 	.attrs = interface_attrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) static const struct attribute_group *attr_groups[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 	&format_group,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 	&event_group,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 	&interface_group,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 	&cpumask_attr_group,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 	NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) static DEFINE_PER_CPU(char, hv_gpci_reqb[HGPCI_REQ_BUFFER_SIZE]) __aligned(sizeof(uint64_t));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) static unsigned long single_gpci_request(u32 req, u32 starting_index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 		u16 secondary_index, u8 version_in, u32 offset, u8 length,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 		u64 *value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 	unsigned long ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 	size_t i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 	u64 count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 	struct hv_gpci_request_buffer *arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 	arg = (void *)get_cpu_var(hv_gpci_reqb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 	memset(arg, 0, HGPCI_REQ_BUFFER_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 	arg->params.counter_request = cpu_to_be32(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 	arg->params.starting_index = cpu_to_be32(starting_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 	arg->params.secondary_index = cpu_to_be16(secondary_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 	arg->params.counter_info_version_in = version_in;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 	ret = plpar_hcall_norets(H_GET_PERF_COUNTER_INFO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 			virt_to_phys(arg), HGPCI_REQ_BUFFER_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 		pr_devel("hcall failed: 0x%lx\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 	 * we verify offset and length are within the zeroed buffer at event
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 	 * init.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 	count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 	for (i = offset; i < offset + length; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 		count |= (u64)(arg->bytes[i]) << ((length - 1 - (i - offset)) * 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 	*value = count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 	put_cpu_var(hv_gpci_reqb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) static u64 h_gpci_get_value(struct perf_event *event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 	u64 count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 	unsigned long ret = single_gpci_request(event_get_request(event),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 					event_get_starting_index(event),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 					event_get_secondary_index(event),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 					event_get_counter_info_version(event),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 					event_get_offset(event),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 					event_get_length(event),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 					&count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 	return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) static void h_gpci_event_update(struct perf_event *event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 	s64 prev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 	u64 now = h_gpci_get_value(event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 	prev = local64_xchg(&event->hw.prev_count, now);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 	local64_add(now - prev, &event->count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) static void h_gpci_event_start(struct perf_event *event, int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 	local64_set(&event->hw.prev_count, h_gpci_get_value(event));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) static void h_gpci_event_stop(struct perf_event *event, int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 	h_gpci_event_update(event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) static int h_gpci_event_add(struct perf_event *event, int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 	if (flags & PERF_EF_START)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 		h_gpci_event_start(event, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) static int h_gpci_event_init(struct perf_event *event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 	u64 count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 	u8 length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 	/* Not our event */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 	if (event->attr.type != event->pmu->type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 		return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 	/* config2 is unused */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 	if (event->attr.config2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 		pr_devel("config2 set when reserved\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 	/* no branch sampling */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) 	if (has_branch_stack(event))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 		return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 	length = event_get_length(event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 	if (length < 1 || length > 8) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 		pr_devel("length invalid\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 	/* last byte within the buffer? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 	if ((event_get_offset(event) + length) > HGPCI_MAX_DATA_BYTES) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 		pr_devel("request outside of buffer: %zu > %zu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 				(size_t)event_get_offset(event) + length,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 				HGPCI_MAX_DATA_BYTES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 	/* check if the request works... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 	if (single_gpci_request(event_get_request(event),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 				event_get_starting_index(event),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 				event_get_secondary_index(event),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 				event_get_counter_info_version(event),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 				event_get_offset(event),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 				length,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 				&count)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 		pr_devel("gpci hcall failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) static struct pmu h_gpci_pmu = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) 	.task_ctx_nr = perf_invalid_context,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) 	.name = "hv_gpci",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) 	.attr_groups = attr_groups,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) 	.event_init  = h_gpci_event_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) 	.add         = h_gpci_event_add,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) 	.del         = h_gpci_event_stop,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) 	.start       = h_gpci_event_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) 	.stop        = h_gpci_event_stop,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) 	.read        = h_gpci_event_update,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) 	.capabilities = PERF_PMU_CAP_NO_EXCLUDE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) static int ppc_hv_gpci_cpu_online(unsigned int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) 	if (cpumask_empty(&hv_gpci_cpumask))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) 		cpumask_set_cpu(cpu, &hv_gpci_cpumask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) static int ppc_hv_gpci_cpu_offline(unsigned int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) 	int target;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) 	/* Check if exiting cpu is used for collecting gpci events */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) 	if (!cpumask_test_and_clear_cpu(cpu, &hv_gpci_cpumask))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) 	/* Find a new cpu to collect gpci events */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) 	target = cpumask_last(cpu_active_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) 	if (target < 0 || target >= nr_cpu_ids) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) 		pr_err("hv_gpci: CPU hotplug init failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) 		return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) 	/* Migrate gpci events to the new target */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) 	cpumask_set_cpu(target, &hv_gpci_cpumask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) 	perf_pmu_migrate_context(&h_gpci_pmu, cpu, target);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) static int hv_gpci_cpu_hotplug_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) 	return cpuhp_setup_state(CPUHP_AP_PERF_POWERPC_HV_GPCI_ONLINE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) 			  "perf/powerpc/hv_gcpi:online",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) 			  ppc_hv_gpci_cpu_online,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) 			  ppc_hv_gpci_cpu_offline);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) static int hv_gpci_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) 	int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) 	unsigned long hret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) 	struct hv_perf_caps caps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) 	hv_gpci_assert_offsets_correct();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) 	if (!firmware_has_feature(FW_FEATURE_LPAR)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) 		pr_debug("not a virtualized system, not enabling\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) 	hret = hv_perf_caps_get(&caps);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) 	if (hret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) 		pr_debug("could not obtain capabilities, not enabling, rc=%ld\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) 				hret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) 	/* init cpuhotplug */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) 	r = hv_gpci_cpu_hotplug_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) 	if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) 		return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) 	/* sampling not supported */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) 	h_gpci_pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) 	r = perf_pmu_register(&h_gpci_pmu, h_gpci_pmu.name, -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) 	if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) 		return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) device_initcall(hv_gpci_init);