Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3)  * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6) #define pr_fmt(fmt) "%s " fmt, KBUILD_MODNAME
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8) #include <linux/atomic.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9) #include <linux/cpu_pm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12) #include <linux/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13) #include <linux/iopoll.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15) #include <linux/list.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17) #include <linux/of.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18) #include <linux/of_irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19) #include <linux/of_platform.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20) #include <linux/platform_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22) #include <linux/spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23) #include <linux/wait.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25) #include <soc/qcom/cmd-db.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26) #include <soc/qcom/tcs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27) #include <dt-bindings/soc/qcom,rpmh-rsc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29) #include "rpmh-internal.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31) #define CREATE_TRACE_POINTS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32) #include "trace-rpmh.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34) #define RSC_DRV_TCS_OFFSET		672
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35) #define RSC_DRV_CMD_OFFSET		20
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37) /* DRV HW Solver Configuration Information Register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38) #define DRV_SOLVER_CONFIG		0x04
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39) #define DRV_HW_SOLVER_MASK		1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40) #define DRV_HW_SOLVER_SHIFT		24
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42) /* DRV TCS Configuration Information Register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43) #define DRV_PRNT_CHLD_CONFIG		0x0C
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44) #define DRV_NUM_TCS_MASK		0x3F
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45) #define DRV_NUM_TCS_SHIFT		6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46) #define DRV_NCPT_MASK			0x1F
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47) #define DRV_NCPT_SHIFT			27
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49) /* Offsets for common TCS Registers, one bit per TCS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50) #define RSC_DRV_IRQ_ENABLE		0x00
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51) #define RSC_DRV_IRQ_STATUS		0x04
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52) #define RSC_DRV_IRQ_CLEAR		0x08	/* w/o; write 1 to clear */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55)  * Offsets for per TCS Registers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57)  * TCSes start at 0x10 from tcs_base and are stored one after another.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58)  * Multiply tcs_id by RSC_DRV_TCS_OFFSET to find a given TCS and add one
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59)  * of the below to find a register.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61) #define RSC_DRV_CMD_WAIT_FOR_CMPL	0x10	/* 1 bit per command */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62) #define RSC_DRV_CONTROL			0x14
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63) #define RSC_DRV_STATUS			0x18	/* zero if tcs is busy */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64) #define RSC_DRV_CMD_ENABLE		0x1C	/* 1 bit per command */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67)  * Offsets for per command in a TCS.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69)  * Commands (up to 16) start at 0x30 in a TCS; multiply command index
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70)  * by RSC_DRV_CMD_OFFSET and add one of the below to find a register.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72) #define RSC_DRV_CMD_MSGID		0x30
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73) #define RSC_DRV_CMD_ADDR		0x34
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74) #define RSC_DRV_CMD_DATA		0x38
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75) #define RSC_DRV_CMD_STATUS		0x3C
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76) #define RSC_DRV_CMD_RESP_DATA		0x40
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78) #define TCS_AMC_MODE_ENABLE		BIT(16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79) #define TCS_AMC_MODE_TRIGGER		BIT(24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) /* TCS CMD register bit mask */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) #define CMD_MSGID_LEN			8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83) #define CMD_MSGID_RESP_REQ		BIT(8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84) #define CMD_MSGID_WRITE			BIT(16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85) #define CMD_STATUS_ISSUED		BIT(8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86) #define CMD_STATUS_COMPL		BIT(16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89)  * Here's a high level overview of how all the registers in RPMH work
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90)  * together:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92)  * - The main rpmh-rsc address is the base of a register space that can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93)  *   be used to find overall configuration of the hardware
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94)  *   (DRV_PRNT_CHLD_CONFIG). Also found within the rpmh-rsc register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95)  *   space are all the TCS blocks. The offset of the TCS blocks is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96)  *   specified in the device tree by "qcom,tcs-offset" and used to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97)  *   compute tcs_base.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98)  * - TCS blocks come one after another. Type, count, and order are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99)  *   specified by the device tree as "qcom,tcs-config".
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100)  * - Each TCS block has some registers, then space for up to 16 commands.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101)  *   Note that though address space is reserved for 16 commands, fewer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102)  *   might be present. See ncpt (num cmds per TCS).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104)  * Here's a picture:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106)  *  +---------------------------------------------------+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107)  *  |RSC                                                |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108)  *  | ctrl                                              |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109)  *  |                                                   |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110)  *  | Drvs:                                             |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111)  *  | +-----------------------------------------------+ |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112)  *  | |DRV0                                           | |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113)  *  | | ctrl/config                                   | |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114)  *  | | IRQ                                           | |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115)  *  | |                                               | |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116)  *  | | TCSes:                                        | |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117)  *  | | +------------------------------------------+  | |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118)  *  | | |TCS0  |  |  |  |  |  |  |  |  |  |  |  |  |  | |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119)  *  | | | ctrl | 0| 1| 2| 3| 4| 5| .| .| .| .|14|15|  | |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120)  *  | | |      |  |  |  |  |  |  |  |  |  |  |  |  |  | |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121)  *  | | +------------------------------------------+  | |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122)  *  | | +------------------------------------------+  | |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123)  *  | | |TCS1  |  |  |  |  |  |  |  |  |  |  |  |  |  | |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124)  *  | | | ctrl | 0| 1| 2| 3| 4| 5| .| .| .| .|14|15|  | |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125)  *  | | |      |  |  |  |  |  |  |  |  |  |  |  |  |  | |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126)  *  | | +------------------------------------------+  | |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127)  *  | | +------------------------------------------+  | |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128)  *  | | |TCS2  |  |  |  |  |  |  |  |  |  |  |  |  |  | |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129)  *  | | | ctrl | 0| 1| 2| 3| 4| 5| .| .| .| .|14|15|  | |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130)  *  | | |      |  |  |  |  |  |  |  |  |  |  |  |  |  | |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131)  *  | | +------------------------------------------+  | |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132)  *  | |                    ......                     | |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133)  *  | +-----------------------------------------------+ |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134)  *  | +-----------------------------------------------+ |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135)  *  | |DRV1                                           | |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136)  *  | | (same as DRV0)                                | |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137)  *  | +-----------------------------------------------+ |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138)  *  |                      ......                       |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139)  *  +---------------------------------------------------+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142) static inline void __iomem *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143) tcs_reg_addr(const struct rsc_drv *drv, int reg, int tcs_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145) 	return drv->tcs_base + RSC_DRV_TCS_OFFSET * tcs_id + reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148) static inline void __iomem *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149) tcs_cmd_addr(const struct rsc_drv *drv, int reg, int tcs_id, int cmd_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151) 	return tcs_reg_addr(drv, reg, tcs_id) + RSC_DRV_CMD_OFFSET * cmd_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154) static u32 read_tcs_cmd(const struct rsc_drv *drv, int reg, int tcs_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155) 			int cmd_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157) 	return readl_relaxed(tcs_cmd_addr(drv, reg, tcs_id, cmd_id));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160) static u32 read_tcs_reg(const struct rsc_drv *drv, int reg, int tcs_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162) 	return readl_relaxed(tcs_reg_addr(drv, reg, tcs_id));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) static void write_tcs_cmd(const struct rsc_drv *drv, int reg, int tcs_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) 			  int cmd_id, u32 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) 	writel_relaxed(data, tcs_cmd_addr(drv, reg, tcs_id, cmd_id));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) static void write_tcs_reg(const struct rsc_drv *drv, int reg, int tcs_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) 			  u32 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174) 	writel_relaxed(data, tcs_reg_addr(drv, reg, tcs_id));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177) static void write_tcs_reg_sync(const struct rsc_drv *drv, int reg, int tcs_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178) 			       u32 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182) 	writel(data, tcs_reg_addr(drv, reg, tcs_id));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185) 	 * Wait until we read back the same value.  Use a counter rather than
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186) 	 * ktime for timeout since this may be called after timekeeping stops.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188) 	for (i = 0; i < USEC_PER_SEC; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189) 		if (readl(tcs_reg_addr(drv, reg, tcs_id)) == data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191) 		udelay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193) 	pr_err("%s: error writing %#x to %d:%#x\n", drv->name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194) 	       data, tcs_id, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198)  * tcs_is_free() - Return if a TCS is totally free.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199)  * @drv:    The RSC controller.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200)  * @tcs_id: The global ID of this TCS.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202)  * Returns true if nobody has claimed this TCS (by setting tcs_in_use).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204)  * Context: Must be called with the drv->lock held.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206)  * Return: true if the given TCS is free.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) static bool tcs_is_free(struct rsc_drv *drv, int tcs_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) 	return !test_bit(tcs_id, drv->tcs_in_use);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214)  * tcs_invalidate() - Invalidate all TCSes of the given type (sleep or wake).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215)  * @drv:  The RSC controller.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216)  * @type: SLEEP_TCS or WAKE_TCS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218)  * This will clear the "slots" variable of the given tcs_group and also
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219)  * tell the hardware to forget about all entries.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221)  * The caller must ensure that no other RPMH actions are happening when this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222)  * function is called, since otherwise the device may immediately become
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223)  * used again even before this function exits.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225) static void tcs_invalidate(struct rsc_drv *drv, int type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227) 	int m;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228) 	struct tcs_group *tcs = &drv->tcs[type];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230) 	/* Caller ensures nobody else is running so no lock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) 	if (bitmap_empty(tcs->slots, MAX_TCS_SLOTS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) 	for (m = tcs->offset; m < tcs->offset + tcs->num_tcs; m++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) 		write_tcs_reg_sync(drv, RSC_DRV_CMD_ENABLE, m, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) 		write_tcs_reg_sync(drv, RSC_DRV_CMD_WAIT_FOR_CMPL, m, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) 	bitmap_zero(tcs->slots, MAX_TCS_SLOTS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242)  * rpmh_rsc_invalidate() - Invalidate sleep and wake TCSes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243)  * @drv: The RSC controller.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245)  * The caller must ensure that no other RPMH actions are happening when this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246)  * function is called, since otherwise the device may immediately become
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247)  * used again even before this function exits.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) void rpmh_rsc_invalidate(struct rsc_drv *drv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251) 	tcs_invalidate(drv, SLEEP_TCS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) 	tcs_invalidate(drv, WAKE_TCS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256)  * get_tcs_for_msg() - Get the tcs_group used to send the given message.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257)  * @drv: The RSC controller.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258)  * @msg: The message we want to send.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260)  * This is normally pretty straightforward except if we are trying to send
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261)  * an ACTIVE_ONLY message but don't have any active_only TCSes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263)  * Return: A pointer to a tcs_group or an ERR_PTR.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265) static struct tcs_group *get_tcs_for_msg(struct rsc_drv *drv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266) 					 const struct tcs_request *msg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) 	int type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) 	struct tcs_group *tcs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) 	switch (msg->state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) 	case RPMH_ACTIVE_ONLY_STATE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) 		type = ACTIVE_TCS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) 	case RPMH_WAKE_ONLY_STATE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) 		type = WAKE_TCS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) 	case RPMH_SLEEP_STATE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279) 		type = SLEEP_TCS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282) 		return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286) 	 * If we are making an active request on a RSC that does not have a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287) 	 * dedicated TCS for active state use, then re-purpose a wake TCS to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) 	 * send active votes. This is safe because we ensure any active-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289) 	 * transfers have finished before we use it (maybe by running from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290) 	 * the last CPU in PM code).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) 	tcs = &drv->tcs[type];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293) 	if (msg->state == RPMH_ACTIVE_ONLY_STATE && !tcs->num_tcs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294) 		tcs = &drv->tcs[WAKE_TCS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) 	return tcs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300)  * get_req_from_tcs() - Get a stashed request that was xfering on the given TCS.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301)  * @drv:    The RSC controller.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302)  * @tcs_id: The global ID of this TCS.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304)  * For ACTIVE_ONLY transfers we want to call back into the client when the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305)  * transfer finishes. To do this we need the "request" that the client
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306)  * originally provided us. This function grabs the request that we stashed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307)  * when we started the transfer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309)  * This only makes sense for ACTIVE_ONLY transfers since those are the only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310)  * ones we track sending (the only ones we enable interrupts for and the only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311)  * ones we call back to the client for).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313)  * Return: The stashed request.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315) static const struct tcs_request *get_req_from_tcs(struct rsc_drv *drv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316) 						  int tcs_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318) 	struct tcs_group *tcs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321) 	for (i = 0; i < TCS_TYPE_NR; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322) 		tcs = &drv->tcs[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323) 		if (tcs->mask & BIT(tcs_id))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) 			return tcs->req[tcs_id - tcs->offset];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331)  * __tcs_set_trigger() - Start xfer on a TCS or unset trigger on a borrowed TCS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332)  * @drv:     The controller.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333)  * @tcs_id:  The global ID of this TCS.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334)  * @trigger: If true then untrigger/retrigger. If false then just untrigger.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336)  * In the normal case we only ever call with "trigger=true" to start a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337)  * transfer. That will un-trigger/disable the TCS from the last transfer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338)  * then trigger/enable for this transfer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340)  * If we borrowed a wake TCS for an active-only transfer we'll also call
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341)  * this function with "trigger=false" to just do the un-trigger/disable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342)  * before using the TCS for wake purposes again.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344)  * Note that the AP is only in charge of triggering active-only transfers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345)  * The AP never triggers sleep/wake values using this function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) static void __tcs_set_trigger(struct rsc_drv *drv, int tcs_id, bool trigger)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) 	u32 enable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) 	 * HW req: Clear the DRV_CONTROL and enable TCS again
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) 	 * While clearing ensure that the AMC mode trigger is cleared
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) 	 * and then the mode enable is cleared.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356) 	enable = read_tcs_reg(drv, RSC_DRV_CONTROL, tcs_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357) 	enable &= ~TCS_AMC_MODE_TRIGGER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358) 	write_tcs_reg_sync(drv, RSC_DRV_CONTROL, tcs_id, enable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) 	enable &= ~TCS_AMC_MODE_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) 	write_tcs_reg_sync(drv, RSC_DRV_CONTROL, tcs_id, enable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362) 	if (trigger) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363) 		/* Enable the AMC mode on the TCS and then trigger the TCS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364) 		enable = TCS_AMC_MODE_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365) 		write_tcs_reg_sync(drv, RSC_DRV_CONTROL, tcs_id, enable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) 		enable |= TCS_AMC_MODE_TRIGGER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367) 		write_tcs_reg_sync(drv, RSC_DRV_CONTROL, tcs_id, enable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372)  * enable_tcs_irq() - Enable or disable interrupts on the given TCS.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373)  * @drv:     The controller.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374)  * @tcs_id:  The global ID of this TCS.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375)  * @enable:  If true then enable; if false then disable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377)  * We only ever call this when we borrow a wake TCS for an active-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378)  * transfer. For active-only TCSes interrupts are always left enabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) static void enable_tcs_irq(struct rsc_drv *drv, int tcs_id, bool enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) 	u32 data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) 	data = readl_relaxed(drv->tcs_base + RSC_DRV_IRQ_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) 	if (enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) 		data |= BIT(tcs_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) 		data &= ~BIT(tcs_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) 	writel_relaxed(data, drv->tcs_base + RSC_DRV_IRQ_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393)  * tcs_tx_done() - TX Done interrupt handler.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394)  * @irq: The IRQ number (ignored).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395)  * @p:   Pointer to "struct rsc_drv".
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397)  * Called for ACTIVE_ONLY transfers (those are the only ones we enable the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398)  * IRQ for) when a transfer is done.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400)  * Return: IRQ_HANDLED
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) static irqreturn_t tcs_tx_done(int irq, void *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) 	struct rsc_drv *drv = p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) 	int i, j, err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) 	unsigned long irq_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) 	const struct tcs_request *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) 	struct tcs_cmd *cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410) 	irq_status = readl_relaxed(drv->tcs_base + RSC_DRV_IRQ_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) 	for_each_set_bit(i, &irq_status, BITS_PER_LONG) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) 		req = get_req_from_tcs(drv, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) 		if (!req) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) 			WARN_ON(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) 			goto skip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) 		err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) 		for (j = 0; j < req->num_cmds; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) 			u32 sts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423) 			cmd = &req->cmds[j];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) 			sts = read_tcs_cmd(drv, RSC_DRV_CMD_STATUS, i, j);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425) 			if (!(sts & CMD_STATUS_ISSUED) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426) 			   ((req->wait_for_compl || cmd->wait) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427) 			   !(sts & CMD_STATUS_COMPL))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) 				pr_err("Incomplete request: %s: addr=%#x data=%#x",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) 				       drv->name, cmd->addr, cmd->data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) 				err = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) 		trace_rpmh_tx_done(drv, i, req, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) 		 * If wake tcs was re-purposed for sending active
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) 		 * votes, clear AMC trigger & enable modes and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) 		 * disable interrupt for this TCS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) 		if (!drv->tcs[ACTIVE_TCS].num_tcs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) 			__tcs_set_trigger(drv, i, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) skip:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) 		/* Reclaim the TCS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) 		write_tcs_reg(drv, RSC_DRV_CMD_ENABLE, i, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) 		write_tcs_reg(drv, RSC_DRV_CMD_WAIT_FOR_CMPL, i, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) 		writel_relaxed(BIT(i), drv->tcs_base + RSC_DRV_IRQ_CLEAR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) 		spin_lock(&drv->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) 		clear_bit(i, drv->tcs_in_use);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) 		 * Disable interrupt for WAKE TCS to avoid being
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) 		 * spammed with interrupts coming when the solver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) 		 * sends its wake votes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) 		if (!drv->tcs[ACTIVE_TCS].num_tcs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) 			enable_tcs_irq(drv, i, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) 		spin_unlock(&drv->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) 		wake_up(&drv->tcs_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) 		if (req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) 			rpmh_tx_done(req, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) 	return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467)  * __tcs_buffer_write() - Write to TCS hardware from a request; don't trigger.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468)  * @drv:    The controller.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469)  * @tcs_id: The global ID of this TCS.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470)  * @cmd_id: The index within the TCS to start writing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471)  * @msg:    The message we want to send, which will contain several addr/data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472)  *          pairs to program (but few enough that they all fit in one TCS).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474)  * This is used for all types of transfers (active, sleep, and wake).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) static void __tcs_buffer_write(struct rsc_drv *drv, int tcs_id, int cmd_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) 			       const struct tcs_request *msg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) 	u32 msgid, cmd_msgid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) 	u32 cmd_enable = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) 	u32 cmd_complete;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) 	struct tcs_cmd *cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) 	int i, j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) 	cmd_msgid = CMD_MSGID_LEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) 	cmd_msgid |= msg->wait_for_compl ? CMD_MSGID_RESP_REQ : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) 	cmd_msgid |= CMD_MSGID_WRITE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) 	cmd_complete = read_tcs_reg(drv, RSC_DRV_CMD_WAIT_FOR_CMPL, tcs_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) 	for (i = 0, j = cmd_id; i < msg->num_cmds; i++, j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) 		cmd = &msg->cmds[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) 		cmd_enable |= BIT(j);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) 		cmd_complete |= cmd->wait << j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) 		msgid = cmd_msgid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) 		msgid |= cmd->wait ? CMD_MSGID_RESP_REQ : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) 		write_tcs_cmd(drv, RSC_DRV_CMD_MSGID, tcs_id, j, msgid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) 		write_tcs_cmd(drv, RSC_DRV_CMD_ADDR, tcs_id, j, cmd->addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) 		write_tcs_cmd(drv, RSC_DRV_CMD_DATA, tcs_id, j, cmd->data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) 		// trace_rpmh_send_msg_rcuidle(drv, tcs_id, j, msgid, cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) 	write_tcs_reg(drv, RSC_DRV_CMD_WAIT_FOR_CMPL, tcs_id, cmd_complete);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) 	cmd_enable |= read_tcs_reg(drv, RSC_DRV_CMD_ENABLE, tcs_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) 	write_tcs_reg(drv, RSC_DRV_CMD_ENABLE, tcs_id, cmd_enable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510)  * check_for_req_inflight() - Look to see if conflicting cmds are in flight.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511)  * @drv: The controller.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512)  * @tcs: A pointer to the tcs_group used for ACTIVE_ONLY transfers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513)  * @msg: The message we want to send, which will contain several addr/data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514)  *       pairs to program (but few enough that they all fit in one TCS).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516)  * This will walk through the TCSes in the group and check if any of them
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517)  * appear to be sending to addresses referenced in the message. If it finds
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518)  * one it'll return -EBUSY.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520)  * Only for use for active-only transfers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522)  * Must be called with the drv->lock held since that protects tcs_in_use.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524)  * Return: 0 if nothing in flight or -EBUSY if we should try again later.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525)  *         The caller must re-enable interrupts between tries since that's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526)  *         the only way tcs_is_free() will ever return true and the only way
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527)  *         RSC_DRV_CMD_ENABLE will ever be cleared.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529) static int check_for_req_inflight(struct rsc_drv *drv, struct tcs_group *tcs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530) 				  const struct tcs_request *msg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) 	unsigned long curr_enabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) 	u32 addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) 	int i, j, k;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) 	int tcs_id = tcs->offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) 	for (i = 0; i < tcs->num_tcs; i++, tcs_id++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) 		if (tcs_is_free(drv, tcs_id))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) 		curr_enabled = read_tcs_reg(drv, RSC_DRV_CMD_ENABLE, tcs_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) 		for_each_set_bit(j, &curr_enabled, MAX_CMDS_PER_TCS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) 			addr = read_tcs_cmd(drv, RSC_DRV_CMD_ADDR, tcs_id, j);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) 			for (k = 0; k < msg->num_cmds; k++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) 				if (addr == msg->cmds[k].addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) 					return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556)  * find_free_tcs() - Find free tcs in the given tcs_group; only for active.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557)  * @tcs: A pointer to the active-only tcs_group (or the wake tcs_group if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558)  *       we borrowed it because there are zero active-only ones).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560)  * Must be called with the drv->lock held since that protects tcs_in_use.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562)  * Return: The first tcs that's free.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564) static int find_free_tcs(struct tcs_group *tcs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) 	for (i = 0; i < tcs->num_tcs; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569) 		if (tcs_is_free(tcs->drv, tcs->offset + i))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570) 			return tcs->offset + i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573) 	return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577)  * claim_tcs_for_req() - Claim a tcs in the given tcs_group; only for active.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578)  * @drv: The controller.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579)  * @tcs: The tcs_group used for ACTIVE_ONLY transfers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580)  * @msg: The data to be sent.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582)  * Claims a tcs in the given tcs_group while making sure that no existing cmd
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583)  * is in flight that would conflict with the one in @msg.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585)  * Context: Must be called with the drv->lock held since that protects
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586)  * tcs_in_use.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588)  * Return: The id of the claimed tcs or -EBUSY if a matching msg is in flight
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589)  * or the tcs_group is full.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) static int claim_tcs_for_req(struct rsc_drv *drv, struct tcs_group *tcs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) 			     const struct tcs_request *msg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) 	 * The h/w does not like if we send a request to the same address,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) 	 * when one is already in-flight or being processed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) 	ret = check_for_req_inflight(drv, tcs, msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) 	return find_free_tcs(tcs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608)  * rpmh_rsc_send_data() - Write / trigger active-only message.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609)  * @drv: The controller.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610)  * @msg: The data to be sent.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612)  * NOTES:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613)  * - This is only used for "ACTIVE_ONLY" since the limitations of this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614)  *   function don't make sense for sleep/wake cases.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615)  * - To do the transfer, we will grab a whole TCS for ourselves--we don't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616)  *   try to share. If there are none available we'll wait indefinitely
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617)  *   for a free one.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618)  * - This function will not wait for the commands to be finished, only for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619)  *   data to be programmed into the RPMh. See rpmh_tx_done() which will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620)  *   be called when the transfer is fully complete.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621)  * - This function must be called with interrupts enabled. If the hardware
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622)  *   is busy doing someone else's transfer we need that transfer to fully
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623)  *   finish so that we can have the hardware, and to fully finish it needs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624)  *   the interrupt handler to run. If the interrupts is set to run on the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625)  *   active CPU this can never happen if interrupts are disabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627)  * Return: 0 on success, -EINVAL on error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) int rpmh_rsc_send_data(struct rsc_drv *drv, const struct tcs_request *msg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) 	struct tcs_group *tcs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) 	int tcs_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) 	tcs = get_tcs_for_msg(drv, msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) 	if (IS_ERR(tcs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) 		return PTR_ERR(tcs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639) 	spin_lock_irqsave(&drv->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641) 	/* Wait forever for a free tcs. It better be there eventually! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642) 	wait_event_lock_irq(drv->tcs_wait,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643) 			    (tcs_id = claim_tcs_for_req(drv, tcs, msg)) >= 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644) 			    drv->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646) 	tcs->req[tcs_id - tcs->offset] = msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) 	set_bit(tcs_id, drv->tcs_in_use);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) 	if (msg->state == RPMH_ACTIVE_ONLY_STATE && tcs->type != ACTIVE_TCS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) 		 * Clear previously programmed WAKE commands in selected
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651) 		 * repurposed TCS to avoid triggering them. tcs->slots will be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) 		 * cleaned from rpmh_flush() by invoking rpmh_rsc_invalidate()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) 		write_tcs_reg_sync(drv, RSC_DRV_CMD_ENABLE, tcs_id, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) 		write_tcs_reg_sync(drv, RSC_DRV_CMD_WAIT_FOR_CMPL, tcs_id, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656) 		enable_tcs_irq(drv, tcs_id, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) 	spin_unlock_irqrestore(&drv->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) 	 * These two can be done after the lock is released because:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) 	 * - We marked "tcs_in_use" under lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) 	 * - Once "tcs_in_use" has been marked nobody else could be writing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) 	 *   to these registers until the interrupt goes off.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) 	 * - The interrupt can't go off until we trigger w/ the last line
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) 	 *   of __tcs_set_trigger() below.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) 	__tcs_buffer_write(drv, tcs_id, 0, msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) 	__tcs_set_trigger(drv, tcs_id, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675)  * find_slots() - Find a place to write the given message.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676)  * @tcs:    The tcs group to search.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677)  * @msg:    The message we want to find room for.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678)  * @tcs_id: If we return 0 from the function, we return the global ID of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679)  *          TCS to write to here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680)  * @cmd_id: If we return 0 from the function, we return the index of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681)  *          the command array of the returned TCS where the client should
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682)  *          start writing the message.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684)  * Only for use on sleep/wake TCSes since those are the only ones we maintain
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685)  * tcs->slots for.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687)  * Return: -ENOMEM if there was no room, else 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) static int find_slots(struct tcs_group *tcs, const struct tcs_request *msg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) 		      int *tcs_id, int *cmd_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) 	int slot, offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) 	int i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) 	/* Do over, until we can fit the full payload in a single TCS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697) 		slot = bitmap_find_next_zero_area(tcs->slots, MAX_TCS_SLOTS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) 						  i, msg->num_cmds, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) 		if (slot >= tcs->num_tcs * tcs->ncpt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) 			return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) 		i += tcs->ncpt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) 	} while (slot + msg->num_cmds - 1 >= i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) 	bitmap_set(tcs->slots, slot, msg->num_cmds);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) 	offset = slot / tcs->ncpt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) 	*tcs_id = offset + tcs->offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) 	*cmd_id = slot % tcs->ncpt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714)  * rpmh_rsc_write_ctrl_data() - Write request to controller but don't trigger.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715)  * @drv: The controller.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716)  * @msg: The data to be written to the controller.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718)  * This should only be called for for sleep/wake state, never active-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719)  * state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721)  * The caller must ensure that no other RPMH actions are happening and the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722)  * controller is idle when this function is called since it runs lockless.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724)  * Return: 0 if no error; else -error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) int rpmh_rsc_write_ctrl_data(struct rsc_drv *drv, const struct tcs_request *msg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) 	struct tcs_group *tcs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) 	int tcs_id = 0, cmd_id = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) 	tcs = get_tcs_for_msg(drv, msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) 	if (IS_ERR(tcs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) 		return PTR_ERR(tcs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) 	/* find the TCS id and the command in the TCS to write to */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) 	ret = find_slots(tcs, msg, &tcs_id, &cmd_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) 	if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) 		__tcs_buffer_write(drv, tcs_id, cmd_id, msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745)  * rpmh_rsc_ctrlr_is_busy() - Check if any of the AMCs are busy.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746)  * @drv: The controller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748)  * Checks if any of the AMCs are busy in handling ACTIVE sets.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749)  * This is called from the last cpu powering down before flushing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750)  * SLEEP and WAKE sets. If AMCs are busy, controller can not enter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751)  * power collapse, so deny from the last cpu's pm notification.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753)  * Context: Must be called with the drv->lock held.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755)  * Return:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756)  * * False		- AMCs are idle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757)  * * True		- AMCs are busy
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) static bool rpmh_rsc_ctrlr_is_busy(struct rsc_drv *drv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) 	int m;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762) 	struct tcs_group *tcs = &drv->tcs[ACTIVE_TCS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) 	 * If we made an active request on a RSC that does not have a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) 	 * dedicated TCS for active state use, then re-purposed wake TCSes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) 	 * should be checked for not busy, because we used wake TCSes for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) 	 * active requests in this case.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) 	if (!tcs->num_tcs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) 		tcs = &drv->tcs[WAKE_TCS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) 	for (m = tcs->offset; m < tcs->offset + tcs->num_tcs; m++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) 		if (!tcs_is_free(drv, m))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) 			return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) 	return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782)  * rpmh_rsc_cpu_pm_callback() - Check if any of the AMCs are busy.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783)  * @nfb:    Pointer to the notifier block in struct rsc_drv.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784)  * @action: CPU_PM_ENTER, CPU_PM_ENTER_FAILED, or CPU_PM_EXIT.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785)  * @v:      Unused
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787)  * This function is given to cpu_pm_register_notifier so we can be informed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788)  * about when CPUs go down. When all CPUs go down we know no more active
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789)  * transfers will be started so we write sleep/wake sets. This function gets
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790)  * called from cpuidle code paths and also at system suspend time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792)  * If its last CPU going down and AMCs are not busy then writes cached sleep
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793)  * and wake messages to TCSes. The firmware then takes care of triggering
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794)  * them when entering deepest low power modes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796)  * Return: See cpu_pm_register_notifier()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) static int rpmh_rsc_cpu_pm_callback(struct notifier_block *nfb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) 				    unsigned long action, void *v)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) 	struct rsc_drv *drv = container_of(nfb, struct rsc_drv, rsc_pm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) 	int ret = NOTIFY_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) 	int cpus_in_pm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) 	switch (action) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) 	case CPU_PM_ENTER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) 		cpus_in_pm = atomic_inc_return(&drv->cpus_in_pm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) 		 * NOTE: comments for num_online_cpus() point out that it's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) 		 * only a snapshot so we need to be careful. It should be OK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) 		 * for us to use, though.  It's important for us not to miss
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) 		 * if we're the last CPU going down so it would only be a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) 		 * problem if a CPU went offline right after we did the check
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) 		 * AND that CPU was not idle AND that CPU was the last non-idle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) 		 * CPU. That can't happen. CPUs would have to come out of idle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) 		 * before the CPU could go offline.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) 		if (cpus_in_pm < num_online_cpus())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) 			return NOTIFY_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) 	case CPU_PM_ENTER_FAILED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) 	case CPU_PM_EXIT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) 		atomic_dec(&drv->cpus_in_pm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) 		return NOTIFY_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) 		return NOTIFY_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) 	 * It's likely we're on the last CPU. Grab the drv->lock and write
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) 	 * out the sleep/wake commands to RPMH hardware. Grabbing the lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) 	 * means that if we race with another CPU coming up we are still
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) 	 * guaranteed to be safe. If another CPU came up just after we checked
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) 	 * and has grabbed the lock or started an active transfer then we'll
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) 	 * notice we're busy and abort. If another CPU comes up after we start
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836) 	 * flushing it will be blocked from starting an active transfer until
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) 	 * we're done flushing. If another CPU starts an active transfer after
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838) 	 * we release the lock we're still OK because we're no longer the last
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) 	 * CPU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) 	if (spin_trylock(&drv->lock)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) 		if (rpmh_rsc_ctrlr_is_busy(drv) || rpmh_flush(&drv->client))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) 			ret = NOTIFY_BAD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) 		spin_unlock(&drv->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) 		/* Another CPU must be up */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) 		return NOTIFY_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) 	if (ret == NOTIFY_BAD) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) 		/* Double-check if we're here because someone else is up */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) 		if (cpus_in_pm < num_online_cpus())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) 			ret = NOTIFY_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) 			/* We won't be called w/ CPU_PM_ENTER_FAILED */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) 			atomic_dec(&drv->cpus_in_pm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) static int rpmh_probe_tcs_config(struct platform_device *pdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) 				 struct rsc_drv *drv, void __iomem *base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) 	struct tcs_type_config {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) 		u32 type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) 		u32 n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) 	} tcs_cfg[TCS_TYPE_NR] = { { 0 } };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) 	struct device_node *dn = pdev->dev.of_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) 	u32 config, max_tcs, ncpt, offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) 	int i, ret, n, st = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) 	struct tcs_group *tcs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) 	ret = of_property_read_u32(dn, "qcom,tcs-offset", &offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) 	drv->tcs_base = base + offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) 	config = readl_relaxed(base + DRV_PRNT_CHLD_CONFIG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) 	max_tcs = config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) 	max_tcs &= DRV_NUM_TCS_MASK << (DRV_NUM_TCS_SHIFT * drv->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) 	max_tcs = max_tcs >> (DRV_NUM_TCS_SHIFT * drv->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) 	ncpt = config & (DRV_NCPT_MASK << DRV_NCPT_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) 	ncpt = ncpt >> DRV_NCPT_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) 	n = of_property_count_u32_elems(dn, "qcom,tcs-config");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) 	if (n != 2 * TCS_TYPE_NR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) 	for (i = 0; i < TCS_TYPE_NR; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) 		ret = of_property_read_u32_index(dn, "qcom,tcs-config",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) 						 i * 2, &tcs_cfg[i].type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) 			return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) 		if (tcs_cfg[i].type >= TCS_TYPE_NR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900) 		ret = of_property_read_u32_index(dn, "qcom,tcs-config",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) 						 i * 2 + 1, &tcs_cfg[i].n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) 			return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) 		if (tcs_cfg[i].n > MAX_TCS_PER_TYPE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) 	for (i = 0; i < TCS_TYPE_NR; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) 		tcs = &drv->tcs[tcs_cfg[i].type];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910) 		if (tcs->drv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) 		tcs->drv = drv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) 		tcs->type = tcs_cfg[i].type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) 		tcs->num_tcs = tcs_cfg[i].n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) 		tcs->ncpt = ncpt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) 		if (!tcs->num_tcs || tcs->type == CONTROL_TCS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) 		if (st + tcs->num_tcs > max_tcs ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) 		    st + tcs->num_tcs >= BITS_PER_BYTE * sizeof(tcs->mask))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) 		tcs->mask = ((1 << tcs->num_tcs) - 1) << st;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) 		tcs->offset = st;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) 		st += tcs->num_tcs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) 	drv->num_tcs = st;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) static int rpmh_rsc_probe(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936) 	struct device_node *dn = pdev->dev.of_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937) 	struct rsc_drv *drv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938) 	struct resource *res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939) 	char drv_id[10] = {0};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) 	int ret, irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) 	u32 solver_config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) 	void __iomem *base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) 	 * Even though RPMh doesn't directly use cmd-db, all of its children
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) 	 * do. To avoid adding this check to our children we'll do it now.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) 	ret = cmd_db_ready();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) 		if (ret != -EPROBE_DEFER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) 			dev_err(&pdev->dev, "Command DB not available (%d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) 									ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) 	drv = devm_kzalloc(&pdev->dev, sizeof(*drv), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) 	if (!drv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) 	ret = of_property_read_u32(dn, "qcom,drv-id", &drv->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) 	drv->name = of_get_property(dn, "label", NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) 	if (!drv->name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) 		drv->name = dev_name(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) 	snprintf(drv_id, ARRAY_SIZE(drv_id), "drv-%d", drv->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, drv_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) 	base = devm_ioremap_resource(&pdev->dev, res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) 	if (IS_ERR(base))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) 		return PTR_ERR(base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) 	ret = rpmh_probe_tcs_config(pdev, drv, base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) 	spin_lock_init(&drv->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) 	init_waitqueue_head(&drv->tcs_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) 	bitmap_zero(drv->tcs_in_use, MAX_TCS_NR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) 	irq = platform_get_irq(pdev, drv->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) 	if (irq < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) 		return irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) 	ret = devm_request_irq(&pdev->dev, irq, tcs_tx_done,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) 			       IRQF_TRIGGER_HIGH | IRQF_NO_SUSPEND,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) 			       drv->name, drv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) 	 * CPU PM notification are not required for controllers that support
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) 	 * 'HW solver' mode where they can be in autonomous mode executing low
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995) 	 * power mode to power down.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) 	solver_config = readl_relaxed(base + DRV_SOLVER_CONFIG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) 	solver_config &= DRV_HW_SOLVER_MASK << DRV_HW_SOLVER_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) 	solver_config = solver_config >> DRV_HW_SOLVER_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) 	if (!solver_config) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) 		drv->rsc_pm.notifier_call = rpmh_rsc_cpu_pm_callback;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) 		cpu_pm_register_notifier(&drv->rsc_pm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) 	/* Enable the active TCS to send requests immediately */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) 	writel_relaxed(drv->tcs[ACTIVE_TCS].mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) 		       drv->tcs_base + RSC_DRV_IRQ_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) 	spin_lock_init(&drv->client.cache_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) 	INIT_LIST_HEAD(&drv->client.cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) 	INIT_LIST_HEAD(&drv->client.batch_cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) 	dev_set_drvdata(&pdev->dev, drv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) 	return devm_of_platform_populate(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) static const struct of_device_id rpmh_drv_match[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) 	{ .compatible = "qcom,rpmh-rsc", },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) 	{ }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) MODULE_DEVICE_TABLE(of, rpmh_drv_match);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) static struct platform_driver rpmh_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) 	.probe = rpmh_rsc_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) 	.driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) 		  .name = "rpmh",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) 		  .of_match_table = rpmh_drv_match,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) 		  .suppress_bind_attrs = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) static int __init rpmh_driver_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) 	return platform_driver_register(&rpmh_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) arch_initcall(rpmh_driver_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) MODULE_DESCRIPTION("Qualcomm Technologies, Inc. RPMh Driver");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) MODULE_LICENSE("GPL v2");