Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) // Copyright (c) 2018 MediaTek Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4) #include <linux/clk.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5) #include <linux/device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6) #include <linux/dma-mapping.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7) #include <linux/err.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) #include <linux/platform_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) #include <linux/pm_runtime.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) #include <linux/spi/spi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) #define SPIS_IRQ_EN_REG		0x0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) #define SPIS_IRQ_CLR_REG	0x4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) #define SPIS_IRQ_ST_REG		0x8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) #define SPIS_IRQ_MASK_REG	0xc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) #define SPIS_CFG_REG		0x10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) #define SPIS_RX_DATA_REG	0x14
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) #define SPIS_TX_DATA_REG	0x18
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) #define SPIS_RX_DST_REG		0x1c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) #define SPIS_TX_SRC_REG		0x20
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) #define SPIS_DMA_CFG_REG	0x30
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) #define SPIS_SOFT_RST_REG	0x40
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) /* SPIS_IRQ_EN_REG */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) #define DMA_DONE_EN		BIT(7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) #define DATA_DONE_EN		BIT(2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) #define RSTA_DONE_EN		BIT(1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) #define CMD_INVALID_EN		BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) /* SPIS_IRQ_ST_REG */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) #define DMA_DONE_ST		BIT(7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) #define DATA_DONE_ST		BIT(2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) #define RSTA_DONE_ST		BIT(1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) #define CMD_INVALID_ST		BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) /* SPIS_IRQ_MASK_REG */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) #define DMA_DONE_MASK		BIT(7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) #define DATA_DONE_MASK		BIT(2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) #define RSTA_DONE_MASK		BIT(1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) #define CMD_INVALID_MASK	BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) /* SPIS_CFG_REG */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) #define SPIS_TX_ENDIAN		BIT(7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) #define SPIS_RX_ENDIAN		BIT(6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) #define SPIS_TXMSBF		BIT(5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) #define SPIS_RXMSBF		BIT(4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) #define SPIS_CPHA		BIT(3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) #define SPIS_CPOL		BIT(2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) #define SPIS_TX_EN		BIT(1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) #define SPIS_RX_EN		BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) /* SPIS_DMA_CFG_REG */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) #define TX_DMA_TRIG_EN		BIT(31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) #define TX_DMA_EN		BIT(30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) #define RX_DMA_EN		BIT(29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) #define TX_DMA_LEN		0xfffff
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) /* SPIS_SOFT_RST_REG */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) #define SPIS_DMA_ADDR_EN	BIT(1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) #define SPIS_SOFT_RST		BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) #define MTK_SPI_SLAVE_MAX_FIFO_SIZE 512U
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) struct mtk_spi_slave {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 	struct device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 	void __iomem *base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 	struct clk *spi_clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 	struct completion xfer_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 	struct spi_transfer *cur_transfer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 	bool slave_aborted;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) static const struct of_device_id mtk_spi_slave_of_match[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 	{ .compatible = "mediatek,mt2712-spi-slave", },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 	{}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) MODULE_DEVICE_TABLE(of, mtk_spi_slave_of_match);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) static void mtk_spi_slave_disable_dma(struct mtk_spi_slave *mdata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 	u32 reg_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 	reg_val = readl(mdata->base + SPIS_DMA_CFG_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 	reg_val &= ~RX_DMA_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 	reg_val &= ~TX_DMA_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 	writel(reg_val, mdata->base + SPIS_DMA_CFG_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) static void mtk_spi_slave_disable_xfer(struct mtk_spi_slave *mdata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 	u32 reg_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 	reg_val = readl(mdata->base + SPIS_CFG_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 	reg_val &= ~SPIS_TX_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 	reg_val &= ~SPIS_RX_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 	writel(reg_val, mdata->base + SPIS_CFG_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) static int mtk_spi_slave_wait_for_completion(struct mtk_spi_slave *mdata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 	if (wait_for_completion_interruptible(&mdata->xfer_done) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 	    mdata->slave_aborted) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 		dev_err(mdata->dev, "interrupted\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 		return -EINTR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) static int mtk_spi_slave_prepare_message(struct spi_controller *ctlr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 					 struct spi_message *msg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 	struct mtk_spi_slave *mdata = spi_controller_get_devdata(ctlr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 	struct spi_device *spi = msg->spi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 	bool cpha, cpol;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 	u32 reg_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 	cpha = spi->mode & SPI_CPHA ? 1 : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 	cpol = spi->mode & SPI_CPOL ? 1 : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 	reg_val = readl(mdata->base + SPIS_CFG_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 	if (cpha)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 		reg_val |= SPIS_CPHA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 		reg_val &= ~SPIS_CPHA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 	if (cpol)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 		reg_val |= SPIS_CPOL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 		reg_val &= ~SPIS_CPOL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 	if (spi->mode & SPI_LSB_FIRST)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 		reg_val &= ~(SPIS_TXMSBF | SPIS_RXMSBF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 		reg_val |= SPIS_TXMSBF | SPIS_RXMSBF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 	reg_val &= ~SPIS_TX_ENDIAN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 	reg_val &= ~SPIS_RX_ENDIAN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 	writel(reg_val, mdata->base + SPIS_CFG_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) static int mtk_spi_slave_fifo_transfer(struct spi_controller *ctlr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 				       struct spi_device *spi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 				       struct spi_transfer *xfer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 	struct mtk_spi_slave *mdata = spi_controller_get_devdata(ctlr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 	int reg_val, cnt, remainder, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 	writel(SPIS_SOFT_RST, mdata->base + SPIS_SOFT_RST_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 	reg_val = readl(mdata->base + SPIS_CFG_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 	if (xfer->rx_buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 		reg_val |= SPIS_RX_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 	if (xfer->tx_buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 		reg_val |= SPIS_TX_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 	writel(reg_val, mdata->base + SPIS_CFG_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 	cnt = xfer->len / 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 	if (xfer->tx_buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 		iowrite32_rep(mdata->base + SPIS_TX_DATA_REG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 			      xfer->tx_buf, cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 	remainder = xfer->len % 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 	if (xfer->tx_buf && remainder > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 		reg_val = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 		memcpy(&reg_val, xfer->tx_buf + cnt * 4, remainder);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 		writel(reg_val, mdata->base + SPIS_TX_DATA_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 	ret = mtk_spi_slave_wait_for_completion(mdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 		mtk_spi_slave_disable_xfer(mdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 		writel(SPIS_SOFT_RST, mdata->base + SPIS_SOFT_RST_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) static int mtk_spi_slave_dma_transfer(struct spi_controller *ctlr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 				      struct spi_device *spi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 				      struct spi_transfer *xfer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 	struct mtk_spi_slave *mdata = spi_controller_get_devdata(ctlr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 	struct device *dev = mdata->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 	int reg_val, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 	writel(SPIS_SOFT_RST, mdata->base + SPIS_SOFT_RST_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 	if (xfer->tx_buf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 		/* tx_buf is a const void* where we need a void * for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 		 * the dma mapping
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 		void *nonconst_tx = (void *)xfer->tx_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 		xfer->tx_dma = dma_map_single(dev, nonconst_tx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 					      xfer->len, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 		if (dma_mapping_error(dev, xfer->tx_dma)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 			ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 			goto disable_transfer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 	if (xfer->rx_buf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 		xfer->rx_dma = dma_map_single(dev, xfer->rx_buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 					      xfer->len, DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 		if (dma_mapping_error(dev, xfer->rx_dma)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 			ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 			goto unmap_txdma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 	writel(xfer->tx_dma, mdata->base + SPIS_TX_SRC_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 	writel(xfer->rx_dma, mdata->base + SPIS_RX_DST_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 	writel(SPIS_DMA_ADDR_EN, mdata->base + SPIS_SOFT_RST_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 	/* enable config reg tx rx_enable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 	reg_val = readl(mdata->base + SPIS_CFG_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 	if (xfer->tx_buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 		reg_val |= SPIS_TX_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 	if (xfer->rx_buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 		reg_val |= SPIS_RX_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 	writel(reg_val, mdata->base + SPIS_CFG_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 	/* config dma */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 	reg_val = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 	reg_val |= (xfer->len - 1) & TX_DMA_LEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 	writel(reg_val, mdata->base + SPIS_DMA_CFG_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 	reg_val = readl(mdata->base + SPIS_DMA_CFG_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 	if (xfer->tx_buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 		reg_val |= TX_DMA_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 	if (xfer->rx_buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 		reg_val |= RX_DMA_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 	reg_val |= TX_DMA_TRIG_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 	writel(reg_val, mdata->base + SPIS_DMA_CFG_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 	ret = mtk_spi_slave_wait_for_completion(mdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) 		goto unmap_rxdma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) unmap_rxdma:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 	if (xfer->rx_buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 		dma_unmap_single(dev, xfer->rx_dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 				 xfer->len, DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) unmap_txdma:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 	if (xfer->tx_buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 		dma_unmap_single(dev, xfer->tx_dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 				 xfer->len, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) disable_transfer:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 	mtk_spi_slave_disable_dma(mdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 	mtk_spi_slave_disable_xfer(mdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 	writel(SPIS_SOFT_RST, mdata->base + SPIS_SOFT_RST_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) static int mtk_spi_slave_transfer_one(struct spi_controller *ctlr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 				      struct spi_device *spi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 				      struct spi_transfer *xfer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 	struct mtk_spi_slave *mdata = spi_controller_get_devdata(ctlr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) 	reinit_completion(&mdata->xfer_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 	mdata->slave_aborted = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) 	mdata->cur_transfer = xfer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) 	if (xfer->len > MTK_SPI_SLAVE_MAX_FIFO_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) 		return mtk_spi_slave_dma_transfer(ctlr, spi, xfer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) 		return mtk_spi_slave_fifo_transfer(ctlr, spi, xfer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) static int mtk_spi_slave_setup(struct spi_device *spi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) 	struct mtk_spi_slave *mdata = spi_controller_get_devdata(spi->master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) 	u32 reg_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) 	reg_val = DMA_DONE_EN | DATA_DONE_EN |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) 		  RSTA_DONE_EN | CMD_INVALID_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) 	writel(reg_val, mdata->base + SPIS_IRQ_EN_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) 	reg_val = DMA_DONE_MASK | DATA_DONE_MASK |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) 		  RSTA_DONE_MASK | CMD_INVALID_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) 	writel(reg_val, mdata->base + SPIS_IRQ_MASK_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) 	mtk_spi_slave_disable_dma(mdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) 	mtk_spi_slave_disable_xfer(mdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) static int mtk_slave_abort(struct spi_controller *ctlr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) 	struct mtk_spi_slave *mdata = spi_controller_get_devdata(ctlr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) 	mdata->slave_aborted = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) 	complete(&mdata->xfer_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) static irqreturn_t mtk_spi_slave_interrupt(int irq, void *dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) 	struct spi_controller *ctlr = dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) 	struct mtk_spi_slave *mdata = spi_controller_get_devdata(ctlr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) 	struct spi_transfer *trans = mdata->cur_transfer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) 	u32 int_status, reg_val, cnt, remainder;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) 	int_status = readl(mdata->base + SPIS_IRQ_ST_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) 	writel(int_status, mdata->base + SPIS_IRQ_CLR_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) 	if (!trans)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) 		return IRQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) 	if ((int_status & DMA_DONE_ST) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) 	    ((int_status & DATA_DONE_ST) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) 	    (int_status & RSTA_DONE_ST))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) 		writel(SPIS_SOFT_RST, mdata->base + SPIS_SOFT_RST_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) 		if (trans->tx_buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) 			dma_unmap_single(mdata->dev, trans->tx_dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) 					 trans->len, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) 		if (trans->rx_buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) 			dma_unmap_single(mdata->dev, trans->rx_dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) 					 trans->len, DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) 		mtk_spi_slave_disable_dma(mdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) 		mtk_spi_slave_disable_xfer(mdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) 	if ((!(int_status & DMA_DONE_ST)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) 	    ((int_status & DATA_DONE_ST) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) 	    (int_status & RSTA_DONE_ST))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) 		cnt = trans->len / 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) 		if (trans->rx_buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) 			ioread32_rep(mdata->base + SPIS_RX_DATA_REG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) 				     trans->rx_buf, cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) 		remainder = trans->len % 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) 		if (trans->rx_buf && remainder > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) 			reg_val = readl(mdata->base + SPIS_RX_DATA_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) 			memcpy(trans->rx_buf + (cnt * 4),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) 			       &reg_val, remainder);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) 		mtk_spi_slave_disable_xfer(mdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) 	if (int_status & CMD_INVALID_ST) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) 		dev_warn(&ctlr->dev, "cmd invalid\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) 		return IRQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) 	mdata->cur_transfer = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) 	complete(&mdata->xfer_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) 	return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) static int mtk_spi_slave_probe(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) 	struct spi_controller *ctlr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) 	struct mtk_spi_slave *mdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) 	int irq, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) 	ctlr = spi_alloc_slave(&pdev->dev, sizeof(*mdata));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) 	if (!ctlr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) 		dev_err(&pdev->dev, "failed to alloc spi slave\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) 	ctlr->auto_runtime_pm = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) 	ctlr->dev.of_node = pdev->dev.of_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) 	ctlr->mode_bits = SPI_CPOL | SPI_CPHA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) 	ctlr->mode_bits |= SPI_LSB_FIRST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) 	ctlr->prepare_message = mtk_spi_slave_prepare_message;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) 	ctlr->transfer_one = mtk_spi_slave_transfer_one;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) 	ctlr->setup = mtk_spi_slave_setup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) 	ctlr->slave_abort = mtk_slave_abort;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) 	mdata = spi_controller_get_devdata(ctlr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) 	platform_set_drvdata(pdev, ctlr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) 	init_completion(&mdata->xfer_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) 	mdata->dev = &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) 	mdata->base = devm_platform_ioremap_resource(pdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) 	if (IS_ERR(mdata->base)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) 		ret = PTR_ERR(mdata->base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) 		goto err_put_ctlr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) 	irq = platform_get_irq(pdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) 	if (irq < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) 		ret = irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) 		goto err_put_ctlr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) 	ret = devm_request_irq(&pdev->dev, irq, mtk_spi_slave_interrupt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) 			       IRQF_TRIGGER_NONE, dev_name(&pdev->dev), ctlr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) 		dev_err(&pdev->dev, "failed to register irq (%d)\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) 		goto err_put_ctlr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) 	mdata->spi_clk = devm_clk_get(&pdev->dev, "spi");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) 	if (IS_ERR(mdata->spi_clk)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) 		ret = PTR_ERR(mdata->spi_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) 		dev_err(&pdev->dev, "failed to get spi-clk: %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) 		goto err_put_ctlr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) 	ret = clk_prepare_enable(mdata->spi_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) 	if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) 		dev_err(&pdev->dev, "failed to enable spi_clk (%d)\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) 		goto err_put_ctlr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) 	pm_runtime_enable(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) 	ret = devm_spi_register_controller(&pdev->dev, ctlr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) 		dev_err(&pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) 			"failed to register slave controller(%d)\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) 		clk_disable_unprepare(mdata->spi_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) 		goto err_disable_runtime_pm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) 	clk_disable_unprepare(mdata->spi_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) err_disable_runtime_pm:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) 	pm_runtime_disable(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) err_put_ctlr:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) 	spi_controller_put(ctlr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) static int mtk_spi_slave_remove(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) 	pm_runtime_disable(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) #ifdef CONFIG_PM_SLEEP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) static int mtk_spi_slave_suspend(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) 	struct spi_controller *ctlr = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) 	struct mtk_spi_slave *mdata = spi_controller_get_devdata(ctlr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) 	ret = spi_controller_suspend(ctlr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) 	if (!pm_runtime_suspended(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) 		clk_disable_unprepare(mdata->spi_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) static int mtk_spi_slave_resume(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) 	struct spi_controller *ctlr = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) 	struct mtk_spi_slave *mdata = spi_controller_get_devdata(ctlr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) 	if (!pm_runtime_suspended(dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) 		ret = clk_prepare_enable(mdata->spi_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) 		if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) 			dev_err(dev, "failed to enable spi_clk (%d)\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) 			return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) 	ret = spi_controller_resume(ctlr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) 		clk_disable_unprepare(mdata->spi_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) #endif /* CONFIG_PM_SLEEP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) #ifdef CONFIG_PM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) static int mtk_spi_slave_runtime_suspend(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) 	struct spi_controller *ctlr = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) 	struct mtk_spi_slave *mdata = spi_controller_get_devdata(ctlr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) 	clk_disable_unprepare(mdata->spi_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) static int mtk_spi_slave_runtime_resume(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) 	struct spi_controller *ctlr = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) 	struct mtk_spi_slave *mdata = spi_controller_get_devdata(ctlr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) 	ret = clk_prepare_enable(mdata->spi_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) 	if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) 		dev_err(dev, "failed to enable spi_clk (%d)\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) #endif /* CONFIG_PM */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) static const struct dev_pm_ops mtk_spi_slave_pm = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) 	SET_SYSTEM_SLEEP_PM_OPS(mtk_spi_slave_suspend, mtk_spi_slave_resume)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) 	SET_RUNTIME_PM_OPS(mtk_spi_slave_runtime_suspend,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) 			   mtk_spi_slave_runtime_resume, NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) static struct platform_driver mtk_spi_slave_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) 	.driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) 		.name = "mtk-spi-slave",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) 		.pm	= &mtk_spi_slave_pm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) 		.of_match_table = mtk_spi_slave_of_match,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) 	.probe = mtk_spi_slave_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) 	.remove = mtk_spi_slave_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) module_platform_driver(mtk_spi_slave_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) MODULE_DESCRIPTION("MTK SPI Slave Controller driver");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) MODULE_AUTHOR("Leilk Liu <leilk.liu@mediatek.com>");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) MODULE_LICENSE("GPL v2");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) MODULE_ALIAS("platform:mtk-spi-slave");