^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright (c) 2011-2017, The Linux Foundation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include <linux/irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/platform_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/clk.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/of.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/pm_runtime.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include "slimbus.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) /* Manager registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #define MGR_CFG 0x200
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #define MGR_STATUS 0x204
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #define MGR_INT_EN 0x210
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #define MGR_INT_STAT 0x214
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #define MGR_INT_CLR 0x218
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #define MGR_TX_MSG 0x230
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #define MGR_RX_MSG 0x270
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #define MGR_IE_STAT 0x2F0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #define MGR_VE_STAT 0x300
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #define MGR_CFG_ENABLE 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) /* Framer registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #define FRM_CFG 0x400
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #define FRM_STAT 0x404
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #define FRM_INT_EN 0x410
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #define FRM_INT_STAT 0x414
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #define FRM_INT_CLR 0x418
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #define FRM_WAKEUP 0x41C
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #define FRM_CLKCTL_DONE 0x420
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #define FRM_IE_STAT 0x430
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #define FRM_VE_STAT 0x440
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) /* Interface registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #define INTF_CFG 0x600
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) #define INTF_STAT 0x604
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #define INTF_INT_EN 0x610
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) #define INTF_INT_STAT 0x614
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) #define INTF_INT_CLR 0x618
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) #define INTF_IE_STAT 0x630
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) #define INTF_VE_STAT 0x640
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) /* Interrupt status bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) #define MGR_INT_TX_NACKED_2 BIT(25)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) #define MGR_INT_MSG_BUF_CONTE BIT(26)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) #define MGR_INT_RX_MSG_RCVD BIT(30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) #define MGR_INT_TX_MSG_SENT BIT(31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) /* Framer config register settings */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) #define FRM_ACTIVE 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) #define CLK_GEAR 7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) #define ROOT_FREQ 11
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) #define REF_CLK_GEAR 15
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) #define INTR_WAKE 19
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) #define SLIM_MSG_ASM_FIRST_WORD(l, mt, mc, dt, ad) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) ((l) | ((mt) << 5) | ((mc) << 8) | ((dt) << 15) | ((ad) << 16))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) #define SLIM_ROOT_FREQ 24576000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) #define QCOM_SLIM_AUTOSUSPEND 1000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) /* MAX message size over control channel */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) #define SLIM_MSGQ_BUF_LEN 40
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) #define QCOM_TX_MSGS 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) #define QCOM_RX_MSGS 8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) #define QCOM_BUF_ALLOC_RETRIES 10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) #define CFG_PORT(r, v) ((v) ? CFG_PORT_V2(r) : CFG_PORT_V1(r))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) /* V2 Component registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) #define CFG_PORT_V2(r) ((r ## _V2))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) #define COMP_CFG_V2 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) #define COMP_TRUST_CFG_V2 0x3000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) /* V1 Component registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) #define CFG_PORT_V1(r) ((r ## _V1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) #define COMP_CFG_V1 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) #define COMP_TRUST_CFG_V1 0x14
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) /* Resource group info for manager, and non-ported generic device-components */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) #define EE_MGR_RSC_GRP (1 << 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) #define EE_NGD_2 (2 << 6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) #define EE_NGD_1 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) struct slim_ctrl_buf {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) void *base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) spinlock_t lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) int head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) int tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) int sl_sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) int n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) struct qcom_slim_ctrl {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) struct slim_controller ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) struct slim_framer framer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) struct device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) void __iomem *base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) void __iomem *slew_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) struct slim_ctrl_buf rx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) struct slim_ctrl_buf tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) struct completion **wr_comp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) int irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) struct workqueue_struct *rxwq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) struct work_struct wd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) struct clk *rclk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) struct clk *hclk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) static void qcom_slim_queue_tx(struct qcom_slim_ctrl *ctrl, void *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) u8 len, u32 tx_reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) int count = (len + 3) >> 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) __iowrite32_copy(ctrl->base + tx_reg, buf, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) /* Ensure Oder of subsequent writes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) static void *slim_alloc_rxbuf(struct qcom_slim_ctrl *ctrl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) int idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) spin_lock_irqsave(&ctrl->rx.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) if ((ctrl->rx.tail + 1) % ctrl->rx.n == ctrl->rx.head) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) spin_unlock_irqrestore(&ctrl->rx.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) dev_err(ctrl->dev, "RX QUEUE full!");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) idx = ctrl->rx.tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) ctrl->rx.tail = (ctrl->rx.tail + 1) % ctrl->rx.n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) spin_unlock_irqrestore(&ctrl->rx.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) return ctrl->rx.base + (idx * ctrl->rx.sl_sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) static void slim_ack_txn(struct qcom_slim_ctrl *ctrl, int err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) struct completion *comp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) int idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) spin_lock_irqsave(&ctrl->tx.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) idx = ctrl->tx.head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) ctrl->tx.head = (ctrl->tx.head + 1) % ctrl->tx.n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) spin_unlock_irqrestore(&ctrl->tx.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) comp = ctrl->wr_comp[idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) ctrl->wr_comp[idx] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) complete(comp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) static irqreturn_t qcom_slim_handle_tx_irq(struct qcom_slim_ctrl *ctrl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) u32 stat)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) if (stat & MGR_INT_TX_MSG_SENT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) writel_relaxed(MGR_INT_TX_MSG_SENT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) ctrl->base + MGR_INT_CLR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) if (stat & MGR_INT_TX_NACKED_2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) u32 mgr_stat = readl_relaxed(ctrl->base + MGR_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) u32 mgr_ie_stat = readl_relaxed(ctrl->base + MGR_IE_STAT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) u32 frm_stat = readl_relaxed(ctrl->base + FRM_STAT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) u32 frm_cfg = readl_relaxed(ctrl->base + FRM_CFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) u32 frm_intr_stat = readl_relaxed(ctrl->base + FRM_INT_STAT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) u32 frm_ie_stat = readl_relaxed(ctrl->base + FRM_IE_STAT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) u32 intf_stat = readl_relaxed(ctrl->base + INTF_STAT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) u32 intf_intr_stat = readl_relaxed(ctrl->base + INTF_INT_STAT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) u32 intf_ie_stat = readl_relaxed(ctrl->base + INTF_IE_STAT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) writel_relaxed(MGR_INT_TX_NACKED_2, ctrl->base + MGR_INT_CLR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) dev_err(ctrl->dev, "TX Nack MGR:int:0x%x, stat:0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) stat, mgr_stat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) dev_err(ctrl->dev, "TX Nack MGR:ie:0x%x\n", mgr_ie_stat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) dev_err(ctrl->dev, "TX Nack FRM:int:0x%x, stat:0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) frm_intr_stat, frm_stat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) dev_err(ctrl->dev, "TX Nack FRM:cfg:0x%x, ie:0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) frm_cfg, frm_ie_stat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) dev_err(ctrl->dev, "TX Nack INTF:intr:0x%x, stat:0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) intf_intr_stat, intf_stat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) dev_err(ctrl->dev, "TX Nack INTF:ie:0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) intf_ie_stat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) err = -ENOTCONN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) slim_ack_txn(ctrl, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) static irqreturn_t qcom_slim_handle_rx_irq(struct qcom_slim_ctrl *ctrl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) u32 stat)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) u32 *rx_buf, pkt[10];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) bool q_rx = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) u8 mc, mt, len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) pkt[0] = readl_relaxed(ctrl->base + MGR_RX_MSG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) mt = SLIM_HEADER_GET_MT(pkt[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) len = SLIM_HEADER_GET_RL(pkt[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) mc = SLIM_HEADER_GET_MC(pkt[0]>>8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) * this message cannot be handled by ISR, so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) * let work-queue handle it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) if (mt == SLIM_MSG_MT_CORE && mc == SLIM_MSG_MC_REPORT_PRESENT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) rx_buf = (u32 *)slim_alloc_rxbuf(ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) if (!rx_buf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) dev_err(ctrl->dev, "dropping RX:0x%x due to RX full\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) pkt[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) goto rx_ret_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) rx_buf[0] = pkt[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) rx_buf = pkt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) __ioread32_copy(rx_buf + 1, ctrl->base + MGR_RX_MSG + 4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) DIV_ROUND_UP(len, 4));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) switch (mc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) case SLIM_MSG_MC_REPORT_PRESENT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) q_rx = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) case SLIM_MSG_MC_REPLY_INFORMATION:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) case SLIM_MSG_MC_REPLY_VALUE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) slim_msg_response(&ctrl->ctrl, (u8 *)(rx_buf + 1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) (u8)(*rx_buf >> 24), (len - 4));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) dev_err(ctrl->dev, "unsupported MC,%x MT:%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) mc, mt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) rx_ret_irq:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) writel(MGR_INT_RX_MSG_RCVD, ctrl->base +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) MGR_INT_CLR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) if (q_rx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) queue_work(ctrl->rxwq, &ctrl->wd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) static irqreturn_t qcom_slim_interrupt(int irq, void *d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) struct qcom_slim_ctrl *ctrl = d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) u32 stat = readl_relaxed(ctrl->base + MGR_INT_STAT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) int ret = IRQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) if (stat & MGR_INT_TX_MSG_SENT || stat & MGR_INT_TX_NACKED_2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) ret = qcom_slim_handle_tx_irq(ctrl, stat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) if (stat & MGR_INT_RX_MSG_RCVD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) ret = qcom_slim_handle_rx_irq(ctrl, stat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) static int qcom_clk_pause_wakeup(struct slim_controller *sctrl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) struct qcom_slim_ctrl *ctrl = dev_get_drvdata(sctrl->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) clk_prepare_enable(ctrl->hclk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) clk_prepare_enable(ctrl->rclk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) enable_irq(ctrl->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) writel_relaxed(1, ctrl->base + FRM_WAKEUP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) /* Make sure framer wakeup write goes through before ISR fires */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) * HW Workaround: Currently, slave is reporting lost-sync messages
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) * after SLIMbus comes out of clock pause.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) * Transaction with slave fail before slave reports that message
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) * Give some time for that report to come
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) * SLIMbus wakes up in clock gear 10 at 24.576MHz. With each superframe
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) * being 250 usecs, we wait for 5-10 superframes here to ensure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) * we get the message
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) usleep_range(1250, 2500);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) static void *slim_alloc_txbuf(struct qcom_slim_ctrl *ctrl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) struct slim_msg_txn *txn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) struct completion *done)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) int idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) spin_lock_irqsave(&ctrl->tx.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) if (((ctrl->tx.head + 1) % ctrl->tx.n) == ctrl->tx.tail) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) spin_unlock_irqrestore(&ctrl->tx.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) dev_err(ctrl->dev, "controller TX buf unavailable");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) idx = ctrl->tx.tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) ctrl->wr_comp[idx] = done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) ctrl->tx.tail = (ctrl->tx.tail + 1) % ctrl->tx.n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) spin_unlock_irqrestore(&ctrl->tx.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) return ctrl->tx.base + (idx * ctrl->tx.sl_sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) static int qcom_xfer_msg(struct slim_controller *sctrl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) struct slim_msg_txn *txn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) struct qcom_slim_ctrl *ctrl = dev_get_drvdata(sctrl->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) DECLARE_COMPLETION_ONSTACK(done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) void *pbuf = slim_alloc_txbuf(ctrl, txn, &done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) unsigned long ms = txn->rl + HZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) u8 *puc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) int ret = 0, timeout, retries = QCOM_BUF_ALLOC_RETRIES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) u8 la = txn->la;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) u32 *head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) /* HW expects length field to be excluded */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) txn->rl--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) /* spin till buffer is made available */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) if (!pbuf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) while (retries--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) usleep_range(10000, 15000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) pbuf = slim_alloc_txbuf(ctrl, txn, &done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) if (pbuf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) if (retries < 0 && !pbuf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) puc = (u8 *)pbuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) head = (u32 *)pbuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) if (txn->dt == SLIM_MSG_DEST_LOGICALADDR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) *head = SLIM_MSG_ASM_FIRST_WORD(txn->rl, txn->mt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) txn->mc, 0, la);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) puc += 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) *head = SLIM_MSG_ASM_FIRST_WORD(txn->rl, txn->mt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) txn->mc, 1, la);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) puc += 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) if (slim_tid_txn(txn->mt, txn->mc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) *(puc++) = txn->tid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) if (slim_ec_txn(txn->mt, txn->mc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) *(puc++) = (txn->ec & 0xFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) *(puc++) = (txn->ec >> 8) & 0xFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) if (txn->msg && txn->msg->wbuf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) memcpy(puc, txn->msg->wbuf, txn->msg->num_bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) qcom_slim_queue_tx(ctrl, head, txn->rl, MGR_TX_MSG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) timeout = wait_for_completion_timeout(&done, msecs_to_jiffies(ms));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) if (!timeout) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) dev_err(ctrl->dev, "TX timed out:MC:0x%x,mt:0x%x", txn->mc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) txn->mt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) ret = -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) static int qcom_set_laddr(struct slim_controller *sctrl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) struct slim_eaddr *ead, u8 laddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) struct qcom_slim_ctrl *ctrl = dev_get_drvdata(sctrl->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) __be16 manf_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) __be16 prod_code;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) u8 dev_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) u8 instance;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) u8 laddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) } __packed p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) struct slim_val_inf msg = {0};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) DEFINE_SLIM_EDEST_TXN(txn, SLIM_MSG_MC_ASSIGN_LOGICAL_ADDRESS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) 10, laddr, &msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) p.manf_id = cpu_to_be16(ead->manf_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) p.prod_code = cpu_to_be16(ead->prod_code);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) p.dev_index = ead->dev_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) p.instance = ead->instance;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) p.laddr = laddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) msg.wbuf = (void *)&p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) msg.num_bytes = 7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) ret = slim_do_transfer(&ctrl->ctrl, &txn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) dev_err(ctrl->dev, "set LA:0x%x failed:ret:%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) laddr, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) static int slim_get_current_rxbuf(struct qcom_slim_ctrl *ctrl, void *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) spin_lock_irqsave(&ctrl->rx.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) if (ctrl->rx.tail == ctrl->rx.head) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) spin_unlock_irqrestore(&ctrl->rx.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) return -ENODATA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) memcpy(buf, ctrl->rx.base + (ctrl->rx.head * ctrl->rx.sl_sz),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) ctrl->rx.sl_sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) ctrl->rx.head = (ctrl->rx.head + 1) % ctrl->rx.n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) spin_unlock_irqrestore(&ctrl->rx.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) static void qcom_slim_rxwq(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) u8 buf[SLIM_MSGQ_BUF_LEN];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) u8 mc, mt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) struct qcom_slim_ctrl *ctrl = container_of(work, struct qcom_slim_ctrl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) wd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) while ((slim_get_current_rxbuf(ctrl, buf)) != -ENODATA) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) mt = SLIM_HEADER_GET_MT(buf[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) mc = SLIM_HEADER_GET_MC(buf[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) if (mt == SLIM_MSG_MT_CORE &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) mc == SLIM_MSG_MC_REPORT_PRESENT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) struct slim_eaddr ea;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) u8 laddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) ea.manf_id = be16_to_cpup((__be16 *)&buf[2]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) ea.prod_code = be16_to_cpup((__be16 *)&buf[4]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) ea.dev_index = buf[6];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) ea.instance = buf[7];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) ret = slim_device_report_present(&ctrl->ctrl, &ea,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) &laddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) dev_err(ctrl->dev, "assign laddr failed:%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) dev_err(ctrl->dev, "unexpected message:mc:%x, mt:%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) mc, mt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) static void qcom_slim_prg_slew(struct platform_device *pdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) struct qcom_slim_ctrl *ctrl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) if (!ctrl->slew_reg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) /* SLEW RATE register for this SLIMbus */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) ctrl->slew_reg = devm_platform_ioremap_resource_byname(pdev, "slew");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) if (IS_ERR(ctrl->slew_reg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) writel_relaxed(1, ctrl->slew_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) /* Make sure SLIMbus-slew rate enabling goes through */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) static int qcom_slim_probe(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) struct qcom_slim_ctrl *ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) struct slim_controller *sctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) struct resource *slim_mem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) int ret, ver;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) ctrl = devm_kzalloc(&pdev->dev, sizeof(*ctrl), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) if (!ctrl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) ctrl->hclk = devm_clk_get(&pdev->dev, "iface");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) if (IS_ERR(ctrl->hclk))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) return PTR_ERR(ctrl->hclk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) ctrl->rclk = devm_clk_get(&pdev->dev, "core");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) if (IS_ERR(ctrl->rclk))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) return PTR_ERR(ctrl->rclk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) ret = clk_set_rate(ctrl->rclk, SLIM_ROOT_FREQ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) dev_err(&pdev->dev, "ref-clock set-rate failed:%d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) ctrl->irq = platform_get_irq(pdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) if (!ctrl->irq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) dev_err(&pdev->dev, "no slimbus IRQ\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) sctrl = &ctrl->ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) sctrl->dev = &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) ctrl->dev = &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) platform_set_drvdata(pdev, ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) dev_set_drvdata(ctrl->dev, ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) slim_mem = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ctrl");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) ctrl->base = devm_ioremap_resource(ctrl->dev, slim_mem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) if (IS_ERR(ctrl->base))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) return PTR_ERR(ctrl->base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) sctrl->set_laddr = qcom_set_laddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) sctrl->xfer_msg = qcom_xfer_msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) sctrl->wakeup = qcom_clk_pause_wakeup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) ctrl->tx.n = QCOM_TX_MSGS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) ctrl->tx.sl_sz = SLIM_MSGQ_BUF_LEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) ctrl->rx.n = QCOM_RX_MSGS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) ctrl->rx.sl_sz = SLIM_MSGQ_BUF_LEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) ctrl->wr_comp = kcalloc(QCOM_TX_MSGS, sizeof(struct completion *),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) if (!ctrl->wr_comp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) spin_lock_init(&ctrl->rx.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) spin_lock_init(&ctrl->tx.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) INIT_WORK(&ctrl->wd, qcom_slim_rxwq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) ctrl->rxwq = create_singlethread_workqueue("qcom_slim_rx");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) if (!ctrl->rxwq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) dev_err(ctrl->dev, "Failed to start Rx WQ\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) ctrl->framer.rootfreq = SLIM_ROOT_FREQ / 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) ctrl->framer.superfreq =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) ctrl->framer.rootfreq / SLIM_CL_PER_SUPERFRAME_DIV8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) sctrl->a_framer = &ctrl->framer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) sctrl->clkgear = SLIM_MAX_CLK_GEAR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) qcom_slim_prg_slew(pdev, ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) ret = devm_request_irq(&pdev->dev, ctrl->irq, qcom_slim_interrupt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) IRQF_TRIGGER_HIGH, "qcom_slim_irq", ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) dev_err(&pdev->dev, "request IRQ failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) goto err_request_irq_failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) ret = clk_prepare_enable(ctrl->hclk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) goto err_hclk_enable_failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) ret = clk_prepare_enable(ctrl->rclk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) goto err_rclk_enable_failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) ctrl->tx.base = devm_kcalloc(&pdev->dev, ctrl->tx.n, ctrl->tx.sl_sz,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) if (!ctrl->tx.base) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) ctrl->rx.base = devm_kcalloc(&pdev->dev,ctrl->rx.n, ctrl->rx.sl_sz,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) if (!ctrl->rx.base) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) /* Register with framework before enabling frame, clock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) ret = slim_register_controller(&ctrl->ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) dev_err(ctrl->dev, "error adding controller\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) ver = readl_relaxed(ctrl->base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) /* Version info in 16 MSbits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) ver >>= 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) /* Component register initialization */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) writel(1, ctrl->base + CFG_PORT(COMP_CFG, ver));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) writel((EE_MGR_RSC_GRP | EE_NGD_2 | EE_NGD_1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) ctrl->base + CFG_PORT(COMP_TRUST_CFG, ver));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) writel((MGR_INT_TX_NACKED_2 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) MGR_INT_MSG_BUF_CONTE | MGR_INT_RX_MSG_RCVD |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) MGR_INT_TX_MSG_SENT), ctrl->base + MGR_INT_EN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) writel(1, ctrl->base + MGR_CFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) /* Framer register initialization */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) writel((1 << INTR_WAKE) | (0xA << REF_CLK_GEAR) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) (0xA << CLK_GEAR) | (1 << ROOT_FREQ) | (1 << FRM_ACTIVE) | 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) ctrl->base + FRM_CFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) writel(MGR_CFG_ENABLE, ctrl->base + MGR_CFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) writel(1, ctrl->base + INTF_CFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) writel(1, ctrl->base + CFG_PORT(COMP_CFG, ver));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) pm_runtime_use_autosuspend(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) pm_runtime_set_autosuspend_delay(&pdev->dev, QCOM_SLIM_AUTOSUSPEND);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) pm_runtime_set_active(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) pm_runtime_mark_last_busy(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) pm_runtime_enable(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) dev_dbg(ctrl->dev, "QCOM SB controller is up:ver:0x%x!\n", ver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) clk_disable_unprepare(ctrl->rclk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) err_rclk_enable_failed:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) clk_disable_unprepare(ctrl->hclk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) err_hclk_enable_failed:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) err_request_irq_failed:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) destroy_workqueue(ctrl->rxwq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) static int qcom_slim_remove(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) struct qcom_slim_ctrl *ctrl = platform_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) pm_runtime_disable(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) slim_unregister_controller(&ctrl->ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) clk_disable_unprepare(ctrl->rclk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) clk_disable_unprepare(ctrl->hclk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) destroy_workqueue(ctrl->rxwq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) * If PM_RUNTIME is not defined, these 2 functions become helper
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) * functions to be called from system suspend/resume.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) #ifdef CONFIG_PM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) static int qcom_slim_runtime_suspend(struct device *device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) struct qcom_slim_ctrl *ctrl = dev_get_drvdata(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) dev_dbg(device, "pm_runtime: suspending...\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) ret = slim_ctrl_clk_pause(&ctrl->ctrl, false, SLIM_CLK_UNSPECIFIED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) dev_err(device, "clk pause not entered:%d", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) disable_irq(ctrl->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) clk_disable_unprepare(ctrl->hclk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) clk_disable_unprepare(ctrl->rclk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) static int qcom_slim_runtime_resume(struct device *device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) struct qcom_slim_ctrl *ctrl = dev_get_drvdata(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) dev_dbg(device, "pm_runtime: resuming...\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) ret = slim_ctrl_clk_pause(&ctrl->ctrl, true, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) dev_err(device, "clk pause not exited:%d", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) #ifdef CONFIG_PM_SLEEP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) static int qcom_slim_suspend(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) if (!pm_runtime_enabled(dev) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) (!pm_runtime_suspended(dev))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) dev_dbg(dev, "system suspend");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) ret = qcom_slim_runtime_suspend(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) static int qcom_slim_resume(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) if (!pm_runtime_enabled(dev) || !pm_runtime_suspended(dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) dev_dbg(dev, "system resume");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) ret = qcom_slim_runtime_resume(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) if (!ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) pm_runtime_mark_last_busy(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) pm_request_autosuspend(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) #endif /* CONFIG_PM_SLEEP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) static const struct dev_pm_ops qcom_slim_dev_pm_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) SET_SYSTEM_SLEEP_PM_OPS(qcom_slim_suspend, qcom_slim_resume)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) SET_RUNTIME_PM_OPS(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) qcom_slim_runtime_suspend,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) qcom_slim_runtime_resume,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) static const struct of_device_id qcom_slim_dt_match[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) { .compatible = "qcom,slim", },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) { .compatible = "qcom,apq8064-slim", },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) {}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) static struct platform_driver qcom_slim_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) .probe = qcom_slim_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) .remove = qcom_slim_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) .driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) .name = "qcom_slim_ctrl",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) .of_match_table = qcom_slim_dt_match,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) .pm = &qcom_slim_dev_pm_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) module_platform_driver(qcom_slim_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) MODULE_LICENSE("GPL v2");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) MODULE_DESCRIPTION("Qualcomm SLIMbus Controller");