^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * CPPC (Collaborative Processor Performance Control) methods used by CPUfreq drivers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * (C) Copyright 2014, 2015 Linaro Ltd.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Author: Ashwin Chaugule <ashwin.chaugule@linaro.org>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * CPPC describes a few methods for controlling CPU performance using
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * information from a per CPU table called CPC. This table is described in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * the ACPI v5.0+ specification. The table consists of a list of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * registers which may be memory mapped or hardware registers and also may
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * include some static integer values.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * CPU performance is on an abstract continuous scale as against a discretized
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * P-state scale which is tied to CPU frequency only. In brief, the basic
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * operation involves:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) * - OS makes a CPU performance request. (Can provide min and max bounds)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) * - Platform (such as BMC) is free to optimize request within requested bounds
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) * depending on power/thermal budgets etc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) * - Platform conveys its decision back to OS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) * The communication between OS and platform occurs through another medium
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) * called (PCC) Platform Communication Channel. This is a generic mailbox like
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) * mechanism which includes doorbell semantics to indicate register updates.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) * See drivers/mailbox/pcc.c for details on PCC.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) * Finer details about the PCC and CPPC spec are available in the ACPI v5.1 and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) * above specifications.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #define pr_fmt(fmt) "ACPI CPPC: " fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #include <linux/cpufreq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #include <linux/iopoll.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #include <linux/ktime.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #include <linux/rwsem.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #include <linux/wait.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #include <acpi/cppc_acpi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) struct cppc_pcc_data {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) struct mbox_chan *pcc_channel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) void __iomem *pcc_comm_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) bool pcc_channel_acquired;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) unsigned int deadline_us;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) unsigned int pcc_mpar, pcc_mrtt, pcc_nominal;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) bool pending_pcc_write_cmd; /* Any pending/batched PCC write cmds? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) bool platform_owns_pcc; /* Ownership of PCC subspace */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) unsigned int pcc_write_cnt; /* Running count of PCC write commands */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) * Lock to provide controlled access to the PCC channel.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) * For performance critical usecases(currently cppc_set_perf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) * We need to take read_lock and check if channel belongs to OSPM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) * before reading or writing to PCC subspace
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) * We need to take write_lock before transferring the channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) * ownership to the platform via a Doorbell
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) * This allows us to batch a number of CPPC requests if they happen
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) * to originate in about the same time
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) * For non-performance critical usecases(init)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) * Take write_lock for all purposes which gives exclusive access
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) struct rw_semaphore pcc_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) /* Wait queue for CPUs whose requests were batched */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) wait_queue_head_t pcc_write_wait_q;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) ktime_t last_cmd_cmpl_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) ktime_t last_mpar_reset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) int mpar_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) int refcount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) /* Array to represent the PCC channel per subspace ID */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) static struct cppc_pcc_data *pcc_data[MAX_PCC_SUBSPACES];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) /* The cpu_pcc_subspace_idx contains per CPU subspace ID */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) static DEFINE_PER_CPU(int, cpu_pcc_subspace_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) * The cpc_desc structure contains the ACPI register details
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) * as described in the per CPU _CPC tables. The details
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) * include the type of register (e.g. PCC, System IO, FFH etc.)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) * and destination addresses which lets us READ/WRITE CPU performance
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) * information using the appropriate I/O methods.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) static DEFINE_PER_CPU(struct cpc_desc *, cpc_desc_ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) /* pcc mapped address + header size + offset within PCC subspace */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) #define GET_PCC_VADDR(offs, pcc_ss_id) (pcc_data[pcc_ss_id]->pcc_comm_addr + \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) 0x8 + (offs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) /* Check if a CPC register is in PCC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) #define CPC_IN_PCC(cpc) ((cpc)->type == ACPI_TYPE_BUFFER && \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) (cpc)->cpc_entry.reg.space_id == \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) ACPI_ADR_SPACE_PLATFORM_COMM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) /* Evalutes to True if reg is a NULL register descriptor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) #define IS_NULL_REG(reg) ((reg)->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY && \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) (reg)->address == 0 && \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) (reg)->bit_width == 0 && \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) (reg)->bit_offset == 0 && \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) (reg)->access_width == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) /* Evalutes to True if an optional cpc field is supported */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) #define CPC_SUPPORTED(cpc) ((cpc)->type == ACPI_TYPE_INTEGER ? \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) !!(cpc)->cpc_entry.int_value : \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) !IS_NULL_REG(&(cpc)->cpc_entry.reg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) * Arbitrary Retries in case the remote processor is slow to respond
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) * to PCC commands. Keeping it high enough to cover emulators where
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) * the processors run painfully slow.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) #define NUM_RETRIES 500ULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) #define define_one_cppc_ro(_name) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) static struct kobj_attribute _name = \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) __ATTR(_name, 0444, show_##_name, NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) #define to_cpc_desc(a) container_of(a, struct cpc_desc, kobj)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) #define show_cppc_data(access_fn, struct_name, member_name) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) static ssize_t show_##member_name(struct kobject *kobj, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) struct kobj_attribute *attr, char *buf) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) struct cpc_desc *cpc_ptr = to_cpc_desc(kobj); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) struct struct_name st_name = {0}; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) int ret; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) ret = access_fn(cpc_ptr->cpu_id, &st_name); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) if (ret) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) return ret; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) return scnprintf(buf, PAGE_SIZE, "%llu\n", \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) (u64)st_name.member_name); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) } \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) define_one_cppc_ro(member_name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, highest_perf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, lowest_perf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, nominal_perf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, lowest_nonlinear_perf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, lowest_freq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, nominal_freq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) show_cppc_data(cppc_get_perf_ctrs, cppc_perf_fb_ctrs, reference_perf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) show_cppc_data(cppc_get_perf_ctrs, cppc_perf_fb_ctrs, wraparound_time);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) static ssize_t show_feedback_ctrs(struct kobject *kobj,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) struct kobj_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) struct cpc_desc *cpc_ptr = to_cpc_desc(kobj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) struct cppc_perf_fb_ctrs fb_ctrs = {0};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) ret = cppc_get_perf_ctrs(cpc_ptr->cpu_id, &fb_ctrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) return scnprintf(buf, PAGE_SIZE, "ref:%llu del:%llu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) fb_ctrs.reference, fb_ctrs.delivered);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) define_one_cppc_ro(feedback_ctrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) static struct attribute *cppc_attrs[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) &feedback_ctrs.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) &reference_perf.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) &wraparound_time.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) &highest_perf.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) &lowest_perf.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) &lowest_nonlinear_perf.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) &nominal_perf.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) &nominal_freq.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) &lowest_freq.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) static struct kobj_type cppc_ktype = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) .sysfs_ops = &kobj_sysfs_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) .default_attrs = cppc_attrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) static int check_pcc_chan(int pcc_ss_id, bool chk_err_bit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) int ret, status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) struct cppc_pcc_data *pcc_ss_data = pcc_data[pcc_ss_id];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) struct acpi_pcct_shared_memory __iomem *generic_comm_base =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) pcc_ss_data->pcc_comm_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) if (!pcc_ss_data->platform_owns_pcc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) * Poll PCC status register every 3us(delay_us) for maximum of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) * deadline_us(timeout_us) until PCC command complete bit is set(cond)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) ret = readw_relaxed_poll_timeout(&generic_comm_base->status, status,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) status & PCC_CMD_COMPLETE_MASK, 3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) pcc_ss_data->deadline_us);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) if (likely(!ret)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) pcc_ss_data->platform_owns_pcc = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) if (chk_err_bit && (status & PCC_ERROR_MASK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) ret = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) if (unlikely(ret))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) pr_err("PCC check channel failed for ss: %d. ret=%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) pcc_ss_id, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) * This function transfers the ownership of the PCC to the platform
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) * So it must be called while holding write_lock(pcc_lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) static int send_pcc_cmd(int pcc_ss_id, u16 cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) int ret = -EIO, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) struct cppc_pcc_data *pcc_ss_data = pcc_data[pcc_ss_id];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) struct acpi_pcct_shared_memory *generic_comm_base =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) (struct acpi_pcct_shared_memory *)pcc_ss_data->pcc_comm_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) unsigned int time_delta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) * For CMD_WRITE we know for a fact the caller should have checked
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) * the channel before writing to PCC space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) if (cmd == CMD_READ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) * If there are pending cpc_writes, then we stole the channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) * before write completion, so first send a WRITE command to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) * platform
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) if (pcc_ss_data->pending_pcc_write_cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) send_pcc_cmd(pcc_ss_id, CMD_WRITE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) ret = check_pcc_chan(pcc_ss_id, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) goto end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) } else /* CMD_WRITE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) pcc_ss_data->pending_pcc_write_cmd = FALSE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) * Handle the Minimum Request Turnaround Time(MRTT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) * "The minimum amount of time that OSPM must wait after the completion
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) * of a command before issuing the next command, in microseconds"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) if (pcc_ss_data->pcc_mrtt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) time_delta = ktime_us_delta(ktime_get(),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) pcc_ss_data->last_cmd_cmpl_time);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) if (pcc_ss_data->pcc_mrtt > time_delta)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) udelay(pcc_ss_data->pcc_mrtt - time_delta);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) * Handle the non-zero Maximum Periodic Access Rate(MPAR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) * "The maximum number of periodic requests that the subspace channel can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) * support, reported in commands per minute. 0 indicates no limitation."
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) * This parameter should be ideally zero or large enough so that it can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) * handle maximum number of requests that all the cores in the system can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) * collectively generate. If it is not, we will follow the spec and just
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) * not send the request to the platform after hitting the MPAR limit in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) * any 60s window
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) if (pcc_ss_data->pcc_mpar) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) if (pcc_ss_data->mpar_count == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) time_delta = ktime_ms_delta(ktime_get(),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) pcc_ss_data->last_mpar_reset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) if ((time_delta < 60 * MSEC_PER_SEC) && pcc_ss_data->last_mpar_reset) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) pr_debug("PCC cmd for subspace %d not sent due to MPAR limit",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) pcc_ss_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) ret = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) goto end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) pcc_ss_data->last_mpar_reset = ktime_get();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) pcc_ss_data->mpar_count = pcc_ss_data->pcc_mpar;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) pcc_ss_data->mpar_count--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) /* Write to the shared comm region. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) writew_relaxed(cmd, &generic_comm_base->command);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) /* Flip CMD COMPLETE bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) writew_relaxed(0, &generic_comm_base->status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) pcc_ss_data->platform_owns_pcc = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) /* Ring doorbell */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) ret = mbox_send_message(pcc_ss_data->pcc_channel, &cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) pr_err("Err sending PCC mbox message. ss: %d cmd:%d, ret:%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) pcc_ss_id, cmd, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) goto end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) /* wait for completion and check for PCC errro bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) ret = check_pcc_chan(pcc_ss_id, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) if (pcc_ss_data->pcc_mrtt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) pcc_ss_data->last_cmd_cmpl_time = ktime_get();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) if (pcc_ss_data->pcc_channel->mbox->txdone_irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) mbox_chan_txdone(pcc_ss_data->pcc_channel, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) mbox_client_txdone(pcc_ss_data->pcc_channel, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) end:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) if (cmd == CMD_WRITE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) if (unlikely(ret)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) for_each_possible_cpu(i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) struct cpc_desc *desc = per_cpu(cpc_desc_ptr, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) if (!desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) if (desc->write_cmd_id == pcc_ss_data->pcc_write_cnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) desc->write_cmd_status = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) pcc_ss_data->pcc_write_cnt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) wake_up_all(&pcc_ss_data->pcc_write_wait_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) static void cppc_chan_tx_done(struct mbox_client *cl, void *msg, int ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) pr_debug("TX did not complete: CMD sent:%x, ret:%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) *(u16 *)msg, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) pr_debug("TX completed. CMD sent:%x, ret:%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) *(u16 *)msg, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) static struct mbox_client cppc_mbox_cl = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) .tx_done = cppc_chan_tx_done,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) .knows_txdone = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) static int acpi_get_psd(struct cpc_desc *cpc_ptr, acpi_handle handle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) int result = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) acpi_status status = AE_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) struct acpi_buffer format = {sizeof("NNNNN"), "NNNNN"};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) struct acpi_buffer state = {0, NULL};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) union acpi_object *psd = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) struct acpi_psd_package *pdomain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) status = acpi_evaluate_object_typed(handle, "_PSD", NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) &buffer, ACPI_TYPE_PACKAGE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) if (status == AE_NOT_FOUND) /* _PSD is optional */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) if (ACPI_FAILURE(status))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) psd = buffer.pointer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) if (!psd || psd->package.count != 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) pr_debug("Invalid _PSD data\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) goto end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) pdomain = &(cpc_ptr->domain_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) state.length = sizeof(struct acpi_psd_package);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) state.pointer = pdomain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) status = acpi_extract_package(&(psd->package.elements[0]),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) &format, &state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) if (ACPI_FAILURE(status)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) pr_debug("Invalid _PSD data for CPU:%d\n", cpc_ptr->cpu_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) goto end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) if (pdomain->num_entries != ACPI_PSD_REV0_ENTRIES) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) pr_debug("Unknown _PSD:num_entries for CPU:%d\n", cpc_ptr->cpu_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) goto end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) if (pdomain->revision != ACPI_PSD_REV0_REVISION) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) pr_debug("Unknown _PSD:revision for CPU: %d\n", cpc_ptr->cpu_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) goto end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) if (pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ALL &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ANY &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) pdomain->coord_type != DOMAIN_COORD_TYPE_HW_ALL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) pr_debug("Invalid _PSD:coord_type for CPU:%d\n", cpc_ptr->cpu_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) goto end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) result = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) end:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) kfree(buffer.pointer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) return result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) * acpi_get_psd_map - Map the CPUs in a common freq domain.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) * @all_cpu_data: Ptrs to CPU specific CPPC data including PSD info.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) * Return: 0 for success or negative value for err.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) int acpi_get_psd_map(struct cppc_cpudata **all_cpu_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) int count_target;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) int retval = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) unsigned int i, j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) cpumask_var_t covered_cpus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) struct cppc_cpudata *pr, *match_pr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) struct acpi_psd_package *pdomain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) struct acpi_psd_package *match_pdomain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) struct cpc_desc *cpc_ptr, *match_cpc_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) if (!zalloc_cpumask_var(&covered_cpus, GFP_KERNEL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) * Now that we have _PSD data from all CPUs, let's setup P-state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) * domain info.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) for_each_possible_cpu(i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) if (cpumask_test_cpu(i, covered_cpus))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) pr = all_cpu_data[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) cpc_ptr = per_cpu(cpc_desc_ptr, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) if (!cpc_ptr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) retval = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) goto err_ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) pdomain = &(cpc_ptr->domain_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) cpumask_set_cpu(i, pr->shared_cpu_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) cpumask_set_cpu(i, covered_cpus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) if (pdomain->num_processors <= 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) /* Validate the Domain info */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) count_target = pdomain->num_processors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ALL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) pr->shared_type = CPUFREQ_SHARED_TYPE_ALL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) else if (pdomain->coord_type == DOMAIN_COORD_TYPE_HW_ALL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) pr->shared_type = CPUFREQ_SHARED_TYPE_HW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) else if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ANY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) pr->shared_type = CPUFREQ_SHARED_TYPE_ANY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) for_each_possible_cpu(j) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) if (i == j)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) match_cpc_ptr = per_cpu(cpc_desc_ptr, j);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) if (!match_cpc_ptr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) retval = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) goto err_ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) match_pdomain = &(match_cpc_ptr->domain_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) if (match_pdomain->domain != pdomain->domain)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) /* Here i and j are in the same domain */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) if (match_pdomain->num_processors != count_target) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) retval = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) goto err_ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) if (pdomain->coord_type != match_pdomain->coord_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) retval = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) goto err_ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) cpumask_set_cpu(j, covered_cpus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) cpumask_set_cpu(j, pr->shared_cpu_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) for_each_cpu(j, pr->shared_cpu_map) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) if (i == j)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) match_pr = all_cpu_data[j];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) match_pr->shared_type = pr->shared_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) cpumask_copy(match_pr->shared_cpu_map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) pr->shared_cpu_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) err_ret:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) for_each_possible_cpu(i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) pr = all_cpu_data[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) /* Assume no coordination on any error parsing domain info */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) cpumask_clear(pr->shared_cpu_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) cpumask_set_cpu(i, pr->shared_cpu_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) pr->shared_type = CPUFREQ_SHARED_TYPE_ALL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) free_cpumask_var(covered_cpus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) return retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) EXPORT_SYMBOL_GPL(acpi_get_psd_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) static int register_pcc_channel(int pcc_ss_idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) struct acpi_pcct_hw_reduced *cppc_ss;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) u64 usecs_lat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) if (pcc_ss_idx >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) pcc_data[pcc_ss_idx]->pcc_channel =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) pcc_mbox_request_channel(&cppc_mbox_cl, pcc_ss_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) if (IS_ERR(pcc_data[pcc_ss_idx]->pcc_channel)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) pr_err("Failed to find PCC channel for subspace %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) pcc_ss_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) * The PCC mailbox controller driver should
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) * have parsed the PCCT (global table of all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) * PCC channels) and stored pointers to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) * subspace communication region in con_priv.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) cppc_ss = (pcc_data[pcc_ss_idx]->pcc_channel)->con_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) if (!cppc_ss) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) pr_err("No PCC subspace found for %d CPPC\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) pcc_ss_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) * cppc_ss->latency is just a Nominal value. In reality
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) * the remote processor could be much slower to reply.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) * So add an arbitrary amount of wait on top of Nominal.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) usecs_lat = NUM_RETRIES * cppc_ss->latency;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) pcc_data[pcc_ss_idx]->deadline_us = usecs_lat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) pcc_data[pcc_ss_idx]->pcc_mrtt = cppc_ss->min_turnaround_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) pcc_data[pcc_ss_idx]->pcc_mpar = cppc_ss->max_access_rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) pcc_data[pcc_ss_idx]->pcc_nominal = cppc_ss->latency;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) pcc_data[pcc_ss_idx]->pcc_comm_addr =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) acpi_os_ioremap(cppc_ss->base_address, cppc_ss->length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) if (!pcc_data[pcc_ss_idx]->pcc_comm_addr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) pr_err("Failed to ioremap PCC comm region mem for %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) pcc_ss_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) /* Set flag so that we don't come here for each CPU. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) pcc_data[pcc_ss_idx]->pcc_channel_acquired = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) * cpc_ffh_supported() - check if FFH reading supported
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) * Check if the architecture has support for functional fixed hardware
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) * read/write capability.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) * Return: true for supported, false for not supported
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) bool __weak cpc_ffh_supported(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) * pcc_data_alloc() - Allocate the pcc_data memory for pcc subspace
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) * Check and allocate the cppc_pcc_data memory.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) * In some processor configurations it is possible that same subspace
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) * is shared between multiple CPUs. This is seen especially in CPUs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) * with hardware multi-threading support.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) * Return: 0 for success, errno for failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) static int pcc_data_alloc(int pcc_ss_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) if (pcc_ss_id < 0 || pcc_ss_id >= MAX_PCC_SUBSPACES)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) if (pcc_data[pcc_ss_id]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) pcc_data[pcc_ss_id]->refcount++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) pcc_data[pcc_ss_id] = kzalloc(sizeof(struct cppc_pcc_data),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) if (!pcc_data[pcc_ss_id])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) pcc_data[pcc_ss_id]->refcount++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) /* Check if CPPC revision + num_ent combination is supported */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) static bool is_cppc_supported(int revision, int num_ent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) int expected_num_ent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) switch (revision) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) case CPPC_V2_REV:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) expected_num_ent = CPPC_V2_NUM_ENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) case CPPC_V3_REV:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) expected_num_ent = CPPC_V3_NUM_ENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) pr_debug("Firmware exports unsupported CPPC revision: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) revision);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) if (expected_num_ent != num_ent) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) pr_debug("Firmware exports %d entries. Expected: %d for CPPC rev:%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) num_ent, expected_num_ent, revision);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) * An example CPC table looks like the following.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) * Name(_CPC, Package()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) * {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) * 17,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) * NumEntries
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) * 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) * // Revision
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) * ResourceTemplate(){Register(PCC, 32, 0, 0x120, 2)},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) * // Highest Performance
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) * ResourceTemplate(){Register(PCC, 32, 0, 0x124, 2)},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) * // Nominal Performance
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) * ResourceTemplate(){Register(PCC, 32, 0, 0x128, 2)},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) * // Lowest Nonlinear Performance
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) * ResourceTemplate(){Register(PCC, 32, 0, 0x12C, 2)},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) * // Lowest Performance
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) * ResourceTemplate(){Register(PCC, 32, 0, 0x130, 2)},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) * // Guaranteed Performance Register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) * ResourceTemplate(){Register(PCC, 32, 0, 0x110, 2)},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) * // Desired Performance Register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) * ResourceTemplate(){Register(SystemMemory, 0, 0, 0, 0)},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) * ..
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) * ..
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) * ..
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) * }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) * Each Register() encodes how to access that specific register.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) * e.g. a sample PCC entry has the following encoding:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) * Register (
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) * PCC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) * AddressSpaceKeyword
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) * 8,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) * //RegisterBitWidth
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) * 8,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) * //RegisterBitOffset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) * 0x30,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) * //RegisterAddress
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) * 9
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) * //AccessSize (subspace ID)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) * 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) * )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) * }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) * acpi_cppc_processor_probe - Search for per CPU _CPC objects.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) * @pr: Ptr to acpi_processor containing this CPU's logical ID.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) * Return: 0 for success or negative value for err.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) int acpi_cppc_processor_probe(struct acpi_processor *pr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) struct acpi_buffer output = {ACPI_ALLOCATE_BUFFER, NULL};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) union acpi_object *out_obj, *cpc_obj;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) struct cpc_desc *cpc_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) struct cpc_reg *gas_t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) struct device *cpu_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) acpi_handle handle = pr->handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) unsigned int num_ent, i, cpc_rev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) int pcc_subspace_id = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) acpi_status status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) int ret = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) /* Parse the ACPI _CPC table for this CPU. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) status = acpi_evaluate_object_typed(handle, "_CPC", NULL, &output,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) ACPI_TYPE_PACKAGE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) if (ACPI_FAILURE(status)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) ret = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) goto out_buf_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) out_obj = (union acpi_object *) output.pointer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) cpc_ptr = kzalloc(sizeof(struct cpc_desc), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) if (!cpc_ptr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) goto out_buf_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) /* First entry is NumEntries. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) cpc_obj = &out_obj->package.elements[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) if (cpc_obj->type == ACPI_TYPE_INTEGER) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) num_ent = cpc_obj->integer.value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) if (num_ent <= 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) pr_debug("Unexpected _CPC NumEntries value (%d) for CPU:%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) num_ent, pr->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) pr_debug("Unexpected entry type(%d) for NumEntries\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) cpc_obj->type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) cpc_ptr->num_entries = num_ent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) /* Second entry should be revision. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) cpc_obj = &out_obj->package.elements[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) if (cpc_obj->type == ACPI_TYPE_INTEGER) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) cpc_rev = cpc_obj->integer.value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) pr_debug("Unexpected entry type(%d) for Revision\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) cpc_obj->type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) cpc_ptr->version = cpc_rev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) if (!is_cppc_supported(cpc_rev, num_ent))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) /* Iterate through remaining entries in _CPC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) for (i = 2; i < num_ent; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) cpc_obj = &out_obj->package.elements[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) if (cpc_obj->type == ACPI_TYPE_INTEGER) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) cpc_ptr->cpc_regs[i-2].type = ACPI_TYPE_INTEGER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) cpc_ptr->cpc_regs[i-2].cpc_entry.int_value = cpc_obj->integer.value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) } else if (cpc_obj->type == ACPI_TYPE_BUFFER) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) gas_t = (struct cpc_reg *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) cpc_obj->buffer.pointer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) * The PCC Subspace index is encoded inside
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) * the CPC table entries. The same PCC index
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) * will be used for all the PCC entries,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) * so extract it only once.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) if (gas_t->space_id == ACPI_ADR_SPACE_PLATFORM_COMM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) if (pcc_subspace_id < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) pcc_subspace_id = gas_t->access_width;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) if (pcc_data_alloc(pcc_subspace_id))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) } else if (pcc_subspace_id != gas_t->access_width) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) pr_debug("Mismatched PCC ids.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) } else if (gas_t->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) if (gas_t->address) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) void __iomem *addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) addr = ioremap(gas_t->address, gas_t->bit_width/8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) if (!addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) cpc_ptr->cpc_regs[i-2].sys_mem_vaddr = addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) if (gas_t->space_id != ACPI_ADR_SPACE_FIXED_HARDWARE || !cpc_ffh_supported()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) /* Support only PCC ,SYS MEM and FFH type regs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) pr_debug("Unsupported register type: %d\n", gas_t->space_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) cpc_ptr->cpc_regs[i-2].type = ACPI_TYPE_BUFFER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) memcpy(&cpc_ptr->cpc_regs[i-2].cpc_entry.reg, gas_t, sizeof(*gas_t));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) pr_debug("Err in entry:%d in CPC table of CPU:%d \n", i, pr->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) per_cpu(cpu_pcc_subspace_idx, pr->id) = pcc_subspace_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) * Initialize the remaining cpc_regs as unsupported.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) * Example: In case FW exposes CPPC v2, the below loop will initialize
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) * LOWEST_FREQ and NOMINAL_FREQ regs as unsupported
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) for (i = num_ent - 2; i < MAX_CPC_REG_ENT; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) cpc_ptr->cpc_regs[i].type = ACPI_TYPE_INTEGER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) cpc_ptr->cpc_regs[i].cpc_entry.int_value = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) /* Store CPU Logical ID */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) cpc_ptr->cpu_id = pr->id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) /* Parse PSD data for this CPU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) ret = acpi_get_psd(cpc_ptr, handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) /* Register PCC channel once for all PCC subspace ID. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) if (pcc_subspace_id >= 0 && !pcc_data[pcc_subspace_id]->pcc_channel_acquired) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) ret = register_pcc_channel(pcc_subspace_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) init_rwsem(&pcc_data[pcc_subspace_id]->pcc_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) init_waitqueue_head(&pcc_data[pcc_subspace_id]->pcc_write_wait_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) /* Everything looks okay */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) pr_debug("Parsed CPC struct for CPU: %d\n", pr->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) /* Add per logical CPU nodes for reading its feedback counters. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) cpu_dev = get_cpu_device(pr->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) if (!cpu_dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) /* Plug PSD data into this CPU's CPC descriptor. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) per_cpu(cpc_desc_ptr, pr->id) = cpc_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) ret = kobject_init_and_add(&cpc_ptr->kobj, &cppc_ktype, &cpu_dev->kobj,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) "acpi_cppc");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) per_cpu(cpc_desc_ptr, pr->id) = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) kobject_put(&cpc_ptr->kobj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) kfree(output.pointer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) out_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) /* Free all the mapped sys mem areas for this CPU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) for (i = 2; i < cpc_ptr->num_entries; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) void __iomem *addr = cpc_ptr->cpc_regs[i-2].sys_mem_vaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) if (addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) iounmap(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) kfree(cpc_ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) out_buf_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) kfree(output.pointer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) EXPORT_SYMBOL_GPL(acpi_cppc_processor_probe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) * acpi_cppc_processor_exit - Cleanup CPC structs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) * @pr: Ptr to acpi_processor containing this CPU's logical ID.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) * Return: Void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) void acpi_cppc_processor_exit(struct acpi_processor *pr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) struct cpc_desc *cpc_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) void __iomem *addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, pr->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) if (pcc_ss_id >=0 && pcc_data[pcc_ss_id]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) if (pcc_data[pcc_ss_id]->pcc_channel_acquired) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) pcc_data[pcc_ss_id]->refcount--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) if (!pcc_data[pcc_ss_id]->refcount) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) pcc_mbox_free_channel(pcc_data[pcc_ss_id]->pcc_channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) kfree(pcc_data[pcc_ss_id]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) pcc_data[pcc_ss_id] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) cpc_ptr = per_cpu(cpc_desc_ptr, pr->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) if (!cpc_ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) /* Free all the mapped sys mem areas for this CPU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) for (i = 2; i < cpc_ptr->num_entries; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) addr = cpc_ptr->cpc_regs[i-2].sys_mem_vaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) if (addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) iounmap(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) kobject_put(&cpc_ptr->kobj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) kfree(cpc_ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) EXPORT_SYMBOL_GPL(acpi_cppc_processor_exit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) * cpc_read_ffh() - Read FFH register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) * @cpunum: CPU number to read
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) * @reg: cppc register information
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) * @val: place holder for return value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) * Read bit_width bits from a specified address and bit_offset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) * Return: 0 for success and error code
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) int __weak cpc_read_ffh(int cpunum, struct cpc_reg *reg, u64 *val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) return -ENOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) * cpc_write_ffh() - Write FFH register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) * @cpunum: CPU number to write
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) * @reg: cppc register information
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) * @val: value to write
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) * Write value of bit_width bits to a specified address and bit_offset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) * Return: 0 for success and error code
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) int __weak cpc_write_ffh(int cpunum, struct cpc_reg *reg, u64 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) return -ENOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) * Since cpc_read and cpc_write are called while holding pcc_lock, it should be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) * as fast as possible. We have already mapped the PCC subspace during init, so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) * we can directly write to it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) static int cpc_read(int cpu, struct cpc_register_resource *reg_res, u64 *val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) int ret_val = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) void __iomem *vaddr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) struct cpc_reg *reg = ®_res->cpc_entry.reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) if (reg_res->type == ACPI_TYPE_INTEGER) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) *val = reg_res->cpc_entry.int_value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) return ret_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) *val = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM && pcc_ss_id >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) vaddr = GET_PCC_VADDR(reg->address, pcc_ss_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) else if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) vaddr = reg_res->sys_mem_vaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) else if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) return cpc_read_ffh(cpu, reg, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) return acpi_os_read_memory((acpi_physical_address)reg->address,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) val, reg->bit_width);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) switch (reg->bit_width) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) case 8:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) *val = readb_relaxed(vaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) case 16:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) *val = readw_relaxed(vaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) case 32:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) *val = readl_relaxed(vaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) case 64:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) *val = readq_relaxed(vaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) pr_debug("Error: Cannot read %u bit width from PCC for ss: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) reg->bit_width, pcc_ss_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) ret_val = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) return ret_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) static int cpc_write(int cpu, struct cpc_register_resource *reg_res, u64 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) int ret_val = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) void __iomem *vaddr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) struct cpc_reg *reg = ®_res->cpc_entry.reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM && pcc_ss_id >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) vaddr = GET_PCC_VADDR(reg->address, pcc_ss_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) else if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) vaddr = reg_res->sys_mem_vaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) else if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) return cpc_write_ffh(cpu, reg, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) return acpi_os_write_memory((acpi_physical_address)reg->address,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) val, reg->bit_width);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) switch (reg->bit_width) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) case 8:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) writeb_relaxed(val, vaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) case 16:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) writew_relaxed(val, vaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) case 32:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) writel_relaxed(val, vaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) case 64:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) writeq_relaxed(val, vaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) pr_debug("Error: Cannot write %u bit width to PCC for ss: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) reg->bit_width, pcc_ss_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) ret_val = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) return ret_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) * cppc_get_desired_perf - Get the value of desired performance register.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) * @cpunum: CPU from which to get desired performance.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) * @desired_perf: address of a variable to store the returned desired performance
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) * Return: 0 for success, -EIO otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) int cppc_get_desired_perf(int cpunum, u64 *desired_perf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpunum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) struct cpc_register_resource *desired_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) struct cppc_pcc_data *pcc_ss_data = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) desired_reg = &cpc_desc->cpc_regs[DESIRED_PERF];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) if (CPC_IN_PCC(desired_reg)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) if (pcc_ss_id < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) pcc_ss_data = pcc_data[pcc_ss_id];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) down_write(&pcc_ss_data->pcc_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) if (send_pcc_cmd(pcc_ss_id, CMD_READ) >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) cpc_read(cpunum, desired_reg, desired_perf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) ret = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) up_write(&pcc_ss_data->pcc_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) cpc_read(cpunum, desired_reg, desired_perf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) EXPORT_SYMBOL_GPL(cppc_get_desired_perf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) * cppc_get_perf_caps - Get a CPU's performance capabilities.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) * @cpunum: CPU from which to get capabilities info.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) * @perf_caps: ptr to cppc_perf_caps. See cppc_acpi.h
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) * Return: 0 for success with perf_caps populated else -ERRNO.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) int cppc_get_perf_caps(int cpunum, struct cppc_perf_caps *perf_caps)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) struct cpc_register_resource *highest_reg, *lowest_reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) *lowest_non_linear_reg, *nominal_reg, *guaranteed_reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) *low_freq_reg = NULL, *nom_freq_reg = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) u64 high, low, guaranteed, nom, min_nonlinear, low_f = 0, nom_f = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpunum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) struct cppc_pcc_data *pcc_ss_data = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) int ret = 0, regs_in_pcc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) if (!cpc_desc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) pr_debug("No CPC descriptor for CPU:%d\n", cpunum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) highest_reg = &cpc_desc->cpc_regs[HIGHEST_PERF];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) lowest_reg = &cpc_desc->cpc_regs[LOWEST_PERF];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) lowest_non_linear_reg = &cpc_desc->cpc_regs[LOW_NON_LINEAR_PERF];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) nominal_reg = &cpc_desc->cpc_regs[NOMINAL_PERF];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) low_freq_reg = &cpc_desc->cpc_regs[LOWEST_FREQ];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) nom_freq_reg = &cpc_desc->cpc_regs[NOMINAL_FREQ];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) guaranteed_reg = &cpc_desc->cpc_regs[GUARANTEED_PERF];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) /* Are any of the regs PCC ?*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) if (CPC_IN_PCC(highest_reg) || CPC_IN_PCC(lowest_reg) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) CPC_IN_PCC(lowest_non_linear_reg) || CPC_IN_PCC(nominal_reg) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) CPC_IN_PCC(low_freq_reg) || CPC_IN_PCC(nom_freq_reg)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) if (pcc_ss_id < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) pr_debug("Invalid pcc_ss_id\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) pcc_ss_data = pcc_data[pcc_ss_id];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) regs_in_pcc = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) down_write(&pcc_ss_data->pcc_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) /* Ring doorbell once to update PCC subspace */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) if (send_pcc_cmd(pcc_ss_id, CMD_READ) < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) ret = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) goto out_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) cpc_read(cpunum, highest_reg, &high);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) perf_caps->highest_perf = high;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) cpc_read(cpunum, lowest_reg, &low);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) perf_caps->lowest_perf = low;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) cpc_read(cpunum, nominal_reg, &nom);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) perf_caps->nominal_perf = nom;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) if (guaranteed_reg->type != ACPI_TYPE_BUFFER ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) IS_NULL_REG(&guaranteed_reg->cpc_entry.reg)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) perf_caps->guaranteed_perf = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) cpc_read(cpunum, guaranteed_reg, &guaranteed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) perf_caps->guaranteed_perf = guaranteed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) cpc_read(cpunum, lowest_non_linear_reg, &min_nonlinear);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) perf_caps->lowest_nonlinear_perf = min_nonlinear;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) if (!high || !low || !nom || !min_nonlinear)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) ret = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) /* Read optional lowest and nominal frequencies if present */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) if (CPC_SUPPORTED(low_freq_reg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) cpc_read(cpunum, low_freq_reg, &low_f);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) if (CPC_SUPPORTED(nom_freq_reg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) cpc_read(cpunum, nom_freq_reg, &nom_f);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) perf_caps->lowest_freq = low_f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) perf_caps->nominal_freq = nom_f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) out_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) if (regs_in_pcc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) up_write(&pcc_ss_data->pcc_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) EXPORT_SYMBOL_GPL(cppc_get_perf_caps);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) * cppc_get_perf_ctrs - Read a CPU's performance feedback counters.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) * @cpunum: CPU from which to read counters.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) * @perf_fb_ctrs: ptr to cppc_perf_fb_ctrs. See cppc_acpi.h
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) * Return: 0 for success with perf_fb_ctrs populated else -ERRNO.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) int cppc_get_perf_ctrs(int cpunum, struct cppc_perf_fb_ctrs *perf_fb_ctrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) struct cpc_register_resource *delivered_reg, *reference_reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) *ref_perf_reg, *ctr_wrap_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpunum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) struct cppc_pcc_data *pcc_ss_data = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) u64 delivered, reference, ref_perf, ctr_wrap_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) int ret = 0, regs_in_pcc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) if (!cpc_desc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) pr_debug("No CPC descriptor for CPU:%d\n", cpunum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) delivered_reg = &cpc_desc->cpc_regs[DELIVERED_CTR];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) reference_reg = &cpc_desc->cpc_regs[REFERENCE_CTR];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) ref_perf_reg = &cpc_desc->cpc_regs[REFERENCE_PERF];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) ctr_wrap_reg = &cpc_desc->cpc_regs[CTR_WRAP_TIME];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) * If reference perf register is not supported then we should
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) * use the nominal perf value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) if (!CPC_SUPPORTED(ref_perf_reg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) ref_perf_reg = &cpc_desc->cpc_regs[NOMINAL_PERF];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) /* Are any of the regs PCC ?*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) if (CPC_IN_PCC(delivered_reg) || CPC_IN_PCC(reference_reg) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) CPC_IN_PCC(ctr_wrap_reg) || CPC_IN_PCC(ref_perf_reg)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) if (pcc_ss_id < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) pr_debug("Invalid pcc_ss_id\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) pcc_ss_data = pcc_data[pcc_ss_id];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) down_write(&pcc_ss_data->pcc_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) regs_in_pcc = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) /* Ring doorbell once to update PCC subspace */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) if (send_pcc_cmd(pcc_ss_id, CMD_READ) < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) ret = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) goto out_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) cpc_read(cpunum, delivered_reg, &delivered);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) cpc_read(cpunum, reference_reg, &reference);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) cpc_read(cpunum, ref_perf_reg, &ref_perf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) * Per spec, if ctr_wrap_time optional register is unsupported, then the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) * performance counters are assumed to never wrap during the lifetime of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) * platform
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) ctr_wrap_time = (u64)(~((u64)0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) if (CPC_SUPPORTED(ctr_wrap_reg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) cpc_read(cpunum, ctr_wrap_reg, &ctr_wrap_time);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) if (!delivered || !reference || !ref_perf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) ret = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) goto out_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) perf_fb_ctrs->delivered = delivered;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) perf_fb_ctrs->reference = reference;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) perf_fb_ctrs->reference_perf = ref_perf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) perf_fb_ctrs->wraparound_time = ctr_wrap_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) out_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) if (regs_in_pcc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) up_write(&pcc_ss_data->pcc_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) EXPORT_SYMBOL_GPL(cppc_get_perf_ctrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) * cppc_set_perf - Set a CPU's performance controls.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) * @cpu: CPU for which to set performance controls.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) * @perf_ctrls: ptr to cppc_perf_ctrls. See cppc_acpi.h
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) * Return: 0 for success, -ERRNO otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) int cppc_set_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) struct cpc_register_resource *desired_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) struct cppc_pcc_data *pcc_ss_data = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) if (!cpc_desc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) pr_debug("No CPC descriptor for CPU:%d\n", cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) desired_reg = &cpc_desc->cpc_regs[DESIRED_PERF];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) * This is Phase-I where we want to write to CPC registers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) * -> We want all CPUs to be able to execute this phase in parallel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) * Since read_lock can be acquired by multiple CPUs simultaneously we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) * achieve that goal here
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) if (CPC_IN_PCC(desired_reg)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) if (pcc_ss_id < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) pr_debug("Invalid pcc_ss_id\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) pcc_ss_data = pcc_data[pcc_ss_id];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) down_read(&pcc_ss_data->pcc_lock); /* BEGIN Phase-I */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) if (pcc_ss_data->platform_owns_pcc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) ret = check_pcc_chan(pcc_ss_id, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) up_read(&pcc_ss_data->pcc_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) * Update the pending_write to make sure a PCC CMD_READ will not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) * arrive and steal the channel during the switch to write lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) pcc_ss_data->pending_pcc_write_cmd = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) cpc_desc->write_cmd_id = pcc_ss_data->pcc_write_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) cpc_desc->write_cmd_status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) * Skip writing MIN/MAX until Linux knows how to come up with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) * useful values.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) cpc_write(cpu, desired_reg, perf_ctrls->desired_perf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) if (CPC_IN_PCC(desired_reg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) up_read(&pcc_ss_data->pcc_lock); /* END Phase-I */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) * This is Phase-II where we transfer the ownership of PCC to Platform
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) * Short Summary: Basically if we think of a group of cppc_set_perf
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) * requests that happened in short overlapping interval. The last CPU to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) * come out of Phase-I will enter Phase-II and ring the doorbell.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) * We have the following requirements for Phase-II:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) * 1. We want to execute Phase-II only when there are no CPUs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) * currently executing in Phase-I
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) * 2. Once we start Phase-II we want to avoid all other CPUs from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) * entering Phase-I.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) * 3. We want only one CPU among all those who went through Phase-I
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) * to run phase-II
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) * If write_trylock fails to get the lock and doesn't transfer the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) * PCC ownership to the platform, then one of the following will be TRUE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) * 1. There is at-least one CPU in Phase-I which will later execute
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) * write_trylock, so the CPUs in Phase-I will be responsible for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) * executing the Phase-II.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) * 2. Some other CPU has beaten this CPU to successfully execute the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) * write_trylock and has already acquired the write_lock. We know for a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) * fact it (other CPU acquiring the write_lock) couldn't have happened
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) * before this CPU's Phase-I as we held the read_lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) * 3. Some other CPU executing pcc CMD_READ has stolen the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) * down_write, in which case, send_pcc_cmd will check for pending
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) * CMD_WRITE commands by checking the pending_pcc_write_cmd.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) * So this CPU can be certain that its request will be delivered
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) * So in all cases, this CPU knows that its request will be delivered
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) * by another CPU and can return
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) * After getting the down_write we still need to check for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) * pending_pcc_write_cmd to take care of the following scenario
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) * The thread running this code could be scheduled out between
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) * Phase-I and Phase-II. Before it is scheduled back on, another CPU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) * could have delivered the request to Platform by triggering the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) * doorbell and transferred the ownership of PCC to platform. So this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) * avoids triggering an unnecessary doorbell and more importantly before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) * triggering the doorbell it makes sure that the PCC channel ownership
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) * is still with OSPM.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) * pending_pcc_write_cmd can also be cleared by a different CPU, if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) * there was a pcc CMD_READ waiting on down_write and it steals the lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) * before the pcc CMD_WRITE is completed. pcc_send_cmd checks for this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) * case during a CMD_READ and if there are pending writes it delivers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) * the write command before servicing the read command
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) if (CPC_IN_PCC(desired_reg)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) if (down_write_trylock(&pcc_ss_data->pcc_lock)) {/* BEGIN Phase-II */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) /* Update only if there are pending write commands */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) if (pcc_ss_data->pending_pcc_write_cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) send_pcc_cmd(pcc_ss_id, CMD_WRITE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) up_write(&pcc_ss_data->pcc_lock); /* END Phase-II */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) /* Wait until pcc_write_cnt is updated by send_pcc_cmd */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) wait_event(pcc_ss_data->pcc_write_wait_q,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) cpc_desc->write_cmd_id != pcc_ss_data->pcc_write_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) /* send_pcc_cmd updates the status in case of failure */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) ret = cpc_desc->write_cmd_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) EXPORT_SYMBOL_GPL(cppc_set_perf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) * cppc_get_transition_latency - returns frequency transition latency in ns
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) * ACPI CPPC does not explicitly specifiy how a platform can specify the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) * transition latency for perfromance change requests. The closest we have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) * is the timing information from the PCCT tables which provides the info
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) * on the number and frequency of PCC commands the platform can handle.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) unsigned int cppc_get_transition_latency(int cpu_num)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) * Expected transition latency is based on the PCCT timing values
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) * Below are definition from ACPI spec:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) * pcc_nominal- Expected latency to process a command, in microseconds
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) * pcc_mpar - The maximum number of periodic requests that the subspace
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) * channel can support, reported in commands per minute. 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) * indicates no limitation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) * pcc_mrtt - The minimum amount of time that OSPM must wait after the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) * completion of a command before issuing the next command,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) * in microseconds.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) unsigned int latency_ns = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) struct cpc_desc *cpc_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) struct cpc_register_resource *desired_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) struct cppc_pcc_data *pcc_ss_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) cpc_desc = per_cpu(cpc_desc_ptr, cpu_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) if (!cpc_desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) return CPUFREQ_ETERNAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) desired_reg = &cpc_desc->cpc_regs[DESIRED_PERF];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) if (!CPC_IN_PCC(desired_reg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) return CPUFREQ_ETERNAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) if (pcc_ss_id < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) return CPUFREQ_ETERNAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) pcc_ss_data = pcc_data[pcc_ss_id];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) if (pcc_ss_data->pcc_mpar)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) latency_ns = 60 * (1000 * 1000 * 1000 / pcc_ss_data->pcc_mpar);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) latency_ns = max(latency_ns, pcc_ss_data->pcc_nominal * 1000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) latency_ns = max(latency_ns, pcc_ss_data->pcc_mrtt * 1000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) return latency_ns;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) EXPORT_SYMBOL_GPL(cppc_get_transition_latency);