Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * Copyright (c) 2015 MediaTek Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  * Author: Leilk Liu <leilk.liu@mediatek.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7) #include <linux/clk.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8) #include <linux/device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) #include <linux/err.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) #include <linux/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) #include <linux/ioport.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) #include <linux/of.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) #include <linux/of_gpio.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) #include <linux/platform_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) #include <linux/platform_data/spi-mt65xx.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) #include <linux/pm_runtime.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) #include <linux/spi/spi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) #include <linux/dma-mapping.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) #define SPI_CFG0_REG                      0x0000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) #define SPI_CFG1_REG                      0x0004
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) #define SPI_TX_SRC_REG                    0x0008
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) #define SPI_RX_DST_REG                    0x000c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) #define SPI_TX_DATA_REG                   0x0010
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) #define SPI_RX_DATA_REG                   0x0014
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) #define SPI_CMD_REG                       0x0018
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) #define SPI_STATUS0_REG                   0x001c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) #define SPI_PAD_SEL_REG                   0x0024
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) #define SPI_CFG2_REG                      0x0028
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) #define SPI_TX_SRC_REG_64                 0x002c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) #define SPI_RX_DST_REG_64                 0x0030
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) #define SPI_CFG0_SCK_HIGH_OFFSET          0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) #define SPI_CFG0_SCK_LOW_OFFSET           8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) #define SPI_CFG0_CS_HOLD_OFFSET           16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) #define SPI_CFG0_CS_SETUP_OFFSET          24
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) #define SPI_ADJUST_CFG0_CS_HOLD_OFFSET    0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) #define SPI_ADJUST_CFG0_CS_SETUP_OFFSET   16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) #define SPI_CFG1_CS_IDLE_OFFSET           0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) #define SPI_CFG1_PACKET_LOOP_OFFSET       8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) #define SPI_CFG1_PACKET_LENGTH_OFFSET     16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) #define SPI_CFG1_GET_TICK_DLY_OFFSET      30
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) #define SPI_CFG1_CS_IDLE_MASK             0xff
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) #define SPI_CFG1_PACKET_LOOP_MASK         0xff00
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) #define SPI_CFG1_PACKET_LENGTH_MASK       0x3ff0000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) #define SPI_CFG2_SCK_HIGH_OFFSET          0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) #define SPI_CFG2_SCK_LOW_OFFSET           16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) #define SPI_CMD_ACT                  BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) #define SPI_CMD_RESUME               BIT(1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) #define SPI_CMD_RST                  BIT(2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) #define SPI_CMD_PAUSE_EN             BIT(4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) #define SPI_CMD_DEASSERT             BIT(5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) #define SPI_CMD_SAMPLE_SEL           BIT(6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) #define SPI_CMD_CS_POL               BIT(7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) #define SPI_CMD_CPHA                 BIT(8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) #define SPI_CMD_CPOL                 BIT(9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) #define SPI_CMD_RX_DMA               BIT(10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) #define SPI_CMD_TX_DMA               BIT(11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) #define SPI_CMD_TXMSBF               BIT(12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) #define SPI_CMD_RXMSBF               BIT(13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) #define SPI_CMD_RX_ENDIAN            BIT(14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) #define SPI_CMD_TX_ENDIAN            BIT(15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) #define SPI_CMD_FINISH_IE            BIT(16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) #define SPI_CMD_PAUSE_IE             BIT(17)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) #define MT8173_SPI_MAX_PAD_SEL 3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) #define MTK_SPI_PAUSE_INT_STATUS 0x2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) #define MTK_SPI_IDLE 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) #define MTK_SPI_PAUSED 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) #define MTK_SPI_MAX_FIFO_SIZE 32U
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) #define MTK_SPI_PACKET_SIZE 1024
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) #define MTK_SPI_32BITS_MASK  (0xffffffff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) #define DMA_ADDR_EXT_BITS (36)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) #define DMA_ADDR_DEF_BITS (32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) struct mtk_spi_compatible {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 	bool need_pad_sel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 	/* Must explicitly send dummy Tx bytes to do Rx only transfer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 	bool must_tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 	/* some IC design adjust cfg register to enhance time accuracy */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 	bool enhance_timing;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 	/* some IC support DMA addr extension */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 	bool dma_ext;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) struct mtk_spi {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 	void __iomem *base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 	u32 state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 	int pad_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 	u32 *pad_sel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 	struct clk *parent_clk, *sel_clk, *spi_clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 	struct spi_transfer *cur_transfer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 	u32 xfer_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 	u32 num_xfered;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 	struct scatterlist *tx_sgl, *rx_sgl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 	u32 tx_sgl_len, rx_sgl_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 	const struct mtk_spi_compatible *dev_comp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) static const struct mtk_spi_compatible mtk_common_compat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) static const struct mtk_spi_compatible mt2712_compat = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 	.must_tx = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) static const struct mtk_spi_compatible mt6765_compat = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 	.need_pad_sel = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 	.must_tx = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 	.enhance_timing = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 	.dma_ext = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) static const struct mtk_spi_compatible mt7622_compat = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 	.must_tx = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 	.enhance_timing = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) static const struct mtk_spi_compatible mt8173_compat = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 	.need_pad_sel = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 	.must_tx = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) static const struct mtk_spi_compatible mt8183_compat = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 	.need_pad_sel = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 	.must_tx = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 	.enhance_timing = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139)  * A piece of default chip info unless the platform
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140)  * supplies it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) static const struct mtk_chip_config mtk_default_chip_info = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 	.sample_sel = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) static const struct of_device_id mtk_spi_of_match[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 	{ .compatible = "mediatek,mt2701-spi",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 		.data = (void *)&mtk_common_compat,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 	{ .compatible = "mediatek,mt2712-spi",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 		.data = (void *)&mt2712_compat,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 	{ .compatible = "mediatek,mt6589-spi",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 		.data = (void *)&mtk_common_compat,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 	{ .compatible = "mediatek,mt6765-spi",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 		.data = (void *)&mt6765_compat,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 	{ .compatible = "mediatek,mt7622-spi",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 		.data = (void *)&mt7622_compat,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 	{ .compatible = "mediatek,mt7629-spi",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 		.data = (void *)&mt7622_compat,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 	{ .compatible = "mediatek,mt8135-spi",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 		.data = (void *)&mtk_common_compat,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 	{ .compatible = "mediatek,mt8173-spi",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 		.data = (void *)&mt8173_compat,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 	{ .compatible = "mediatek,mt8183-spi",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 		.data = (void *)&mt8183_compat,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 	{ .compatible = "mediatek,mt8192-spi",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 		.data = (void *)&mt6765_compat,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 	{}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) MODULE_DEVICE_TABLE(of, mtk_spi_of_match);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) static void mtk_spi_reset(struct mtk_spi *mdata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 	u32 reg_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 	/* set the software reset bit in SPI_CMD_REG. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 	reg_val = readl(mdata->base + SPI_CMD_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 	reg_val |= SPI_CMD_RST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 	writel(reg_val, mdata->base + SPI_CMD_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 	reg_val = readl(mdata->base + SPI_CMD_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 	reg_val &= ~SPI_CMD_RST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 	writel(reg_val, mdata->base + SPI_CMD_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) static int mtk_spi_prepare_message(struct spi_master *master,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 				   struct spi_message *msg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 	u16 cpha, cpol;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 	u32 reg_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 	struct spi_device *spi = msg->spi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 	struct mtk_chip_config *chip_config = spi->controller_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 	struct mtk_spi *mdata = spi_master_get_devdata(master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 	cpha = spi->mode & SPI_CPHA ? 1 : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 	cpol = spi->mode & SPI_CPOL ? 1 : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 	reg_val = readl(mdata->base + SPI_CMD_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 	if (cpha)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 		reg_val |= SPI_CMD_CPHA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 		reg_val &= ~SPI_CMD_CPHA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 	if (cpol)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 		reg_val |= SPI_CMD_CPOL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 		reg_val &= ~SPI_CMD_CPOL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 	/* set the mlsbx and mlsbtx */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 	if (spi->mode & SPI_LSB_FIRST) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 		reg_val &= ~SPI_CMD_TXMSBF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 		reg_val &= ~SPI_CMD_RXMSBF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 		reg_val |= SPI_CMD_TXMSBF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 		reg_val |= SPI_CMD_RXMSBF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 	/* set the tx/rx endian */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) #ifdef __LITTLE_ENDIAN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 	reg_val &= ~SPI_CMD_TX_ENDIAN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 	reg_val &= ~SPI_CMD_RX_ENDIAN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 	reg_val |= SPI_CMD_TX_ENDIAN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 	reg_val |= SPI_CMD_RX_ENDIAN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 	if (mdata->dev_comp->enhance_timing) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 		/* set CS polarity */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 		if (spi->mode & SPI_CS_HIGH)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 			reg_val |= SPI_CMD_CS_POL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 			reg_val &= ~SPI_CMD_CS_POL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 		if (chip_config->sample_sel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) 			reg_val |= SPI_CMD_SAMPLE_SEL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 			reg_val &= ~SPI_CMD_SAMPLE_SEL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 	/* set finish and pause interrupt always enable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 	reg_val |= SPI_CMD_FINISH_IE | SPI_CMD_PAUSE_IE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 	/* disable dma mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 	reg_val &= ~(SPI_CMD_TX_DMA | SPI_CMD_RX_DMA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 	/* disable deassert mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 	reg_val &= ~SPI_CMD_DEASSERT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 	writel(reg_val, mdata->base + SPI_CMD_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 	/* pad select */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 	if (mdata->dev_comp->need_pad_sel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 		writel(mdata->pad_sel[spi->chip_select],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 		       mdata->base + SPI_PAD_SEL_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) static void mtk_spi_set_cs(struct spi_device *spi, bool enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 	u32 reg_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) 	struct mtk_spi *mdata = spi_master_get_devdata(spi->master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 	if (spi->mode & SPI_CS_HIGH)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) 		enable = !enable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) 	reg_val = readl(mdata->base + SPI_CMD_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) 	if (!enable) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) 		reg_val |= SPI_CMD_PAUSE_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) 		writel(reg_val, mdata->base + SPI_CMD_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) 		reg_val &= ~SPI_CMD_PAUSE_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) 		writel(reg_val, mdata->base + SPI_CMD_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) 		mdata->state = MTK_SPI_IDLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) 		mtk_spi_reset(mdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) static void mtk_spi_prepare_transfer(struct spi_master *master,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) 				     struct spi_transfer *xfer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) 	u32 spi_clk_hz, div, sck_time, cs_time, reg_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) 	struct mtk_spi *mdata = spi_master_get_devdata(master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) 	spi_clk_hz = clk_get_rate(mdata->spi_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) 	if (xfer->speed_hz < spi_clk_hz / 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) 		div = DIV_ROUND_UP(spi_clk_hz, xfer->speed_hz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) 		div = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) 	sck_time = (div + 1) / 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) 	cs_time = sck_time * 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) 	if (mdata->dev_comp->enhance_timing) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) 		reg_val = (((sck_time - 1) & 0xffff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) 			   << SPI_CFG2_SCK_HIGH_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) 		reg_val |= (((sck_time - 1) & 0xffff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) 			   << SPI_CFG2_SCK_LOW_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) 		writel(reg_val, mdata->base + SPI_CFG2_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) 		reg_val = (((cs_time - 1) & 0xffff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) 			   << SPI_ADJUST_CFG0_CS_HOLD_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) 		reg_val |= (((cs_time - 1) & 0xffff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) 			   << SPI_ADJUST_CFG0_CS_SETUP_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) 		writel(reg_val, mdata->base + SPI_CFG0_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) 		reg_val = (((sck_time - 1) & 0xff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) 			   << SPI_CFG0_SCK_HIGH_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) 		reg_val |= (((sck_time - 1) & 0xff) << SPI_CFG0_SCK_LOW_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) 		reg_val |= (((cs_time - 1) & 0xff) << SPI_CFG0_CS_HOLD_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) 		reg_val |= (((cs_time - 1) & 0xff) << SPI_CFG0_CS_SETUP_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) 		writel(reg_val, mdata->base + SPI_CFG0_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) 	reg_val = readl(mdata->base + SPI_CFG1_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) 	reg_val &= ~SPI_CFG1_CS_IDLE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) 	reg_val |= (((cs_time - 1) & 0xff) << SPI_CFG1_CS_IDLE_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) 	writel(reg_val, mdata->base + SPI_CFG1_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) static void mtk_spi_setup_packet(struct spi_master *master)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) 	u32 packet_size, packet_loop, reg_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) 	struct mtk_spi *mdata = spi_master_get_devdata(master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) 	packet_size = min_t(u32, mdata->xfer_len, MTK_SPI_PACKET_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) 	packet_loop = mdata->xfer_len / packet_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) 	reg_val = readl(mdata->base + SPI_CFG1_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) 	reg_val &= ~(SPI_CFG1_PACKET_LENGTH_MASK | SPI_CFG1_PACKET_LOOP_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) 	reg_val |= (packet_size - 1) << SPI_CFG1_PACKET_LENGTH_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) 	reg_val |= (packet_loop - 1) << SPI_CFG1_PACKET_LOOP_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) 	writel(reg_val, mdata->base + SPI_CFG1_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) static void mtk_spi_enable_transfer(struct spi_master *master)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) 	u32 cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) 	struct mtk_spi *mdata = spi_master_get_devdata(master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) 	cmd = readl(mdata->base + SPI_CMD_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) 	if (mdata->state == MTK_SPI_IDLE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) 		cmd |= SPI_CMD_ACT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) 		cmd |= SPI_CMD_RESUME;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) 	writel(cmd, mdata->base + SPI_CMD_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) static int mtk_spi_get_mult_delta(u32 xfer_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) 	u32 mult_delta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) 	if (xfer_len > MTK_SPI_PACKET_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) 		mult_delta = xfer_len % MTK_SPI_PACKET_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) 		mult_delta = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) 	return mult_delta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) static void mtk_spi_update_mdata_len(struct spi_master *master)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) 	int mult_delta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) 	struct mtk_spi *mdata = spi_master_get_devdata(master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) 	if (mdata->tx_sgl_len && mdata->rx_sgl_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) 		if (mdata->tx_sgl_len > mdata->rx_sgl_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) 			mult_delta = mtk_spi_get_mult_delta(mdata->rx_sgl_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) 			mdata->xfer_len = mdata->rx_sgl_len - mult_delta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) 			mdata->rx_sgl_len = mult_delta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) 			mdata->tx_sgl_len -= mdata->xfer_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) 			mult_delta = mtk_spi_get_mult_delta(mdata->tx_sgl_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) 			mdata->xfer_len = mdata->tx_sgl_len - mult_delta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) 			mdata->tx_sgl_len = mult_delta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) 			mdata->rx_sgl_len -= mdata->xfer_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) 	} else if (mdata->tx_sgl_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) 		mult_delta = mtk_spi_get_mult_delta(mdata->tx_sgl_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) 		mdata->xfer_len = mdata->tx_sgl_len - mult_delta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) 		mdata->tx_sgl_len = mult_delta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) 	} else if (mdata->rx_sgl_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) 		mult_delta = mtk_spi_get_mult_delta(mdata->rx_sgl_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) 		mdata->xfer_len = mdata->rx_sgl_len - mult_delta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) 		mdata->rx_sgl_len = mult_delta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) static void mtk_spi_setup_dma_addr(struct spi_master *master,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) 				   struct spi_transfer *xfer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) 	struct mtk_spi *mdata = spi_master_get_devdata(master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) 	if (mdata->tx_sgl) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) 		writel((u32)(xfer->tx_dma & MTK_SPI_32BITS_MASK),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) 		       mdata->base + SPI_TX_SRC_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) 		if (mdata->dev_comp->dma_ext)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) 			writel((u32)(xfer->tx_dma >> 32),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) 			       mdata->base + SPI_TX_SRC_REG_64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) 	if (mdata->rx_sgl) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) 		writel((u32)(xfer->rx_dma & MTK_SPI_32BITS_MASK),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) 		       mdata->base + SPI_RX_DST_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) 		if (mdata->dev_comp->dma_ext)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) 			writel((u32)(xfer->rx_dma >> 32),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) 			       mdata->base + SPI_RX_DST_REG_64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) static int mtk_spi_fifo_transfer(struct spi_master *master,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) 				 struct spi_device *spi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) 				 struct spi_transfer *xfer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) 	int cnt, remainder;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) 	u32 reg_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) 	struct mtk_spi *mdata = spi_master_get_devdata(master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) 	mdata->cur_transfer = xfer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) 	mdata->xfer_len = min(MTK_SPI_MAX_FIFO_SIZE, xfer->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) 	mdata->num_xfered = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) 	mtk_spi_prepare_transfer(master, xfer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) 	mtk_spi_setup_packet(master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) 	if (xfer->tx_buf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) 		cnt = xfer->len / 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) 		iowrite32_rep(mdata->base + SPI_TX_DATA_REG, xfer->tx_buf, cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) 		remainder = xfer->len % 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) 		if (remainder > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) 			reg_val = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) 			memcpy(&reg_val, xfer->tx_buf + (cnt * 4), remainder);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) 			writel(reg_val, mdata->base + SPI_TX_DATA_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) 	mtk_spi_enable_transfer(master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) 	return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) static int mtk_spi_dma_transfer(struct spi_master *master,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) 				struct spi_device *spi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) 				struct spi_transfer *xfer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) 	int cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) 	struct mtk_spi *mdata = spi_master_get_devdata(master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) 	mdata->tx_sgl = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) 	mdata->rx_sgl = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) 	mdata->tx_sgl_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) 	mdata->rx_sgl_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) 	mdata->cur_transfer = xfer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) 	mdata->num_xfered = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) 	mtk_spi_prepare_transfer(master, xfer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) 	cmd = readl(mdata->base + SPI_CMD_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) 	if (xfer->tx_buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) 		cmd |= SPI_CMD_TX_DMA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) 	if (xfer->rx_buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) 		cmd |= SPI_CMD_RX_DMA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) 	writel(cmd, mdata->base + SPI_CMD_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) 	if (xfer->tx_buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) 		mdata->tx_sgl = xfer->tx_sg.sgl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) 	if (xfer->rx_buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) 		mdata->rx_sgl = xfer->rx_sg.sgl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) 	if (mdata->tx_sgl) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) 		xfer->tx_dma = sg_dma_address(mdata->tx_sgl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) 		mdata->tx_sgl_len = sg_dma_len(mdata->tx_sgl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) 	if (mdata->rx_sgl) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) 		xfer->rx_dma = sg_dma_address(mdata->rx_sgl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) 		mdata->rx_sgl_len = sg_dma_len(mdata->rx_sgl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) 	mtk_spi_update_mdata_len(master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) 	mtk_spi_setup_packet(master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) 	mtk_spi_setup_dma_addr(master, xfer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) 	mtk_spi_enable_transfer(master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) 	return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) static int mtk_spi_transfer_one(struct spi_master *master,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) 				struct spi_device *spi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) 				struct spi_transfer *xfer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) 	if (master->can_dma(master, spi, xfer))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) 		return mtk_spi_dma_transfer(master, spi, xfer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) 		return mtk_spi_fifo_transfer(master, spi, xfer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) static bool mtk_spi_can_dma(struct spi_master *master,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) 			    struct spi_device *spi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) 			    struct spi_transfer *xfer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) 	/* Buffers for DMA transactions must be 4-byte aligned */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) 	return (xfer->len > MTK_SPI_MAX_FIFO_SIZE &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) 		(unsigned long)xfer->tx_buf % 4 == 0 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) 		(unsigned long)xfer->rx_buf % 4 == 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) static int mtk_spi_setup(struct spi_device *spi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) 	struct mtk_spi *mdata = spi_master_get_devdata(spi->master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) 	if (!spi->controller_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) 		spi->controller_data = (void *)&mtk_default_chip_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) 	if (mdata->dev_comp->need_pad_sel && gpio_is_valid(spi->cs_gpio))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) 		gpio_direction_output(spi->cs_gpio, !(spi->mode & SPI_CS_HIGH));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) static irqreturn_t mtk_spi_interrupt(int irq, void *dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) 	u32 cmd, reg_val, cnt, remainder, len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) 	struct spi_master *master = dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) 	struct mtk_spi *mdata = spi_master_get_devdata(master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) 	struct spi_transfer *trans = mdata->cur_transfer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) 	reg_val = readl(mdata->base + SPI_STATUS0_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) 	if (reg_val & MTK_SPI_PAUSE_INT_STATUS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) 		mdata->state = MTK_SPI_PAUSED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) 		mdata->state = MTK_SPI_IDLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) 	if (!master->can_dma(master, NULL, trans)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) 		if (trans->rx_buf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) 			cnt = mdata->xfer_len / 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) 			ioread32_rep(mdata->base + SPI_RX_DATA_REG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) 				     trans->rx_buf + mdata->num_xfered, cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) 			remainder = mdata->xfer_len % 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) 			if (remainder > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) 				reg_val = readl(mdata->base + SPI_RX_DATA_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) 				memcpy(trans->rx_buf +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) 					mdata->num_xfered +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) 					(cnt * 4),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) 					&reg_val,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) 					remainder);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) 		mdata->num_xfered += mdata->xfer_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) 		if (mdata->num_xfered == trans->len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) 			spi_finalize_current_transfer(master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) 			return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) 		len = trans->len - mdata->num_xfered;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) 		mdata->xfer_len = min(MTK_SPI_MAX_FIFO_SIZE, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) 		mtk_spi_setup_packet(master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) 		cnt = mdata->xfer_len / 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) 		iowrite32_rep(mdata->base + SPI_TX_DATA_REG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) 				trans->tx_buf + mdata->num_xfered, cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) 		remainder = mdata->xfer_len % 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) 		if (remainder > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) 			reg_val = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) 			memcpy(&reg_val,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) 				trans->tx_buf + (cnt * 4) + mdata->num_xfered,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) 				remainder);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) 			writel(reg_val, mdata->base + SPI_TX_DATA_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) 		mtk_spi_enable_transfer(master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) 		return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) 	if (mdata->tx_sgl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) 		trans->tx_dma += mdata->xfer_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) 	if (mdata->rx_sgl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) 		trans->rx_dma += mdata->xfer_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) 	if (mdata->tx_sgl && (mdata->tx_sgl_len == 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) 		mdata->tx_sgl = sg_next(mdata->tx_sgl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) 		if (mdata->tx_sgl) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) 			trans->tx_dma = sg_dma_address(mdata->tx_sgl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) 			mdata->tx_sgl_len = sg_dma_len(mdata->tx_sgl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) 	if (mdata->rx_sgl && (mdata->rx_sgl_len == 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) 		mdata->rx_sgl = sg_next(mdata->rx_sgl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) 		if (mdata->rx_sgl) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) 			trans->rx_dma = sg_dma_address(mdata->rx_sgl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) 			mdata->rx_sgl_len = sg_dma_len(mdata->rx_sgl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) 	if (!mdata->tx_sgl && !mdata->rx_sgl) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) 		/* spi disable dma */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) 		cmd = readl(mdata->base + SPI_CMD_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) 		cmd &= ~SPI_CMD_TX_DMA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) 		cmd &= ~SPI_CMD_RX_DMA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) 		writel(cmd, mdata->base + SPI_CMD_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) 		spi_finalize_current_transfer(master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) 		return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) 	mtk_spi_update_mdata_len(master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) 	mtk_spi_setup_packet(master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) 	mtk_spi_setup_dma_addr(master, trans);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) 	mtk_spi_enable_transfer(master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) 	return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) static int mtk_spi_probe(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) 	struct spi_master *master;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) 	struct mtk_spi *mdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) 	const struct of_device_id *of_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) 	int i, irq, ret, addr_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) 	master = spi_alloc_master(&pdev->dev, sizeof(*mdata));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) 	if (!master) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) 		dev_err(&pdev->dev, "failed to alloc spi master\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) 	master->auto_runtime_pm = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) 	master->dev.of_node = pdev->dev.of_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) 	master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) 	master->set_cs = mtk_spi_set_cs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) 	master->prepare_message = mtk_spi_prepare_message;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) 	master->transfer_one = mtk_spi_transfer_one;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) 	master->can_dma = mtk_spi_can_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) 	master->setup = mtk_spi_setup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) 	of_id = of_match_node(mtk_spi_of_match, pdev->dev.of_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) 	if (!of_id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) 		dev_err(&pdev->dev, "failed to probe of_node\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) 		ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) 		goto err_put_master;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) 	mdata = spi_master_get_devdata(master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) 	mdata->dev_comp = of_id->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) 	if (mdata->dev_comp->enhance_timing)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) 		master->mode_bits |= SPI_CS_HIGH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) 	if (mdata->dev_comp->must_tx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) 		master->flags = SPI_MASTER_MUST_TX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) 	if (mdata->dev_comp->need_pad_sel) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) 		mdata->pad_num = of_property_count_u32_elems(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) 			pdev->dev.of_node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) 			"mediatek,pad-select");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) 		if (mdata->pad_num < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) 			dev_err(&pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) 				"No 'mediatek,pad-select' property\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) 			ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) 			goto err_put_master;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) 		mdata->pad_sel = devm_kmalloc_array(&pdev->dev, mdata->pad_num,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) 						    sizeof(u32), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) 		if (!mdata->pad_sel) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) 			ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) 			goto err_put_master;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) 		for (i = 0; i < mdata->pad_num; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) 			of_property_read_u32_index(pdev->dev.of_node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) 						   "mediatek,pad-select",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) 						   i, &mdata->pad_sel[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) 			if (mdata->pad_sel[i] > MT8173_SPI_MAX_PAD_SEL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) 				dev_err(&pdev->dev, "wrong pad-sel[%d]: %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) 					i, mdata->pad_sel[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) 				ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) 				goto err_put_master;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) 	platform_set_drvdata(pdev, master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) 	mdata->base = devm_platform_ioremap_resource(pdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) 	if (IS_ERR(mdata->base)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) 		ret = PTR_ERR(mdata->base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) 		goto err_put_master;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) 	irq = platform_get_irq(pdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) 	if (irq < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) 		ret = irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) 		goto err_put_master;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) 	if (!pdev->dev.dma_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) 		pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) 	ret = devm_request_irq(&pdev->dev, irq, mtk_spi_interrupt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) 			       IRQF_TRIGGER_NONE, dev_name(&pdev->dev), master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) 		dev_err(&pdev->dev, "failed to register irq (%d)\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) 		goto err_put_master;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) 	mdata->parent_clk = devm_clk_get(&pdev->dev, "parent-clk");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) 	if (IS_ERR(mdata->parent_clk)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) 		ret = PTR_ERR(mdata->parent_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) 		dev_err(&pdev->dev, "failed to get parent-clk: %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) 		goto err_put_master;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) 	mdata->sel_clk = devm_clk_get(&pdev->dev, "sel-clk");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) 	if (IS_ERR(mdata->sel_clk)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) 		ret = PTR_ERR(mdata->sel_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) 		dev_err(&pdev->dev, "failed to get sel-clk: %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) 		goto err_put_master;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) 	mdata->spi_clk = devm_clk_get(&pdev->dev, "spi-clk");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) 	if (IS_ERR(mdata->spi_clk)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) 		ret = PTR_ERR(mdata->spi_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) 		dev_err(&pdev->dev, "failed to get spi-clk: %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) 		goto err_put_master;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) 	ret = clk_prepare_enable(mdata->spi_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) 	if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) 		dev_err(&pdev->dev, "failed to enable spi_clk (%d)\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) 		goto err_put_master;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) 	ret = clk_set_parent(mdata->sel_clk, mdata->parent_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) 	if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) 		dev_err(&pdev->dev, "failed to clk_set_parent (%d)\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) 		clk_disable_unprepare(mdata->spi_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) 		goto err_put_master;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) 	clk_disable_unprepare(mdata->spi_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) 	pm_runtime_enable(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) 	ret = devm_spi_register_master(&pdev->dev, master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) 		dev_err(&pdev->dev, "failed to register master (%d)\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) 		goto err_disable_runtime_pm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) 	if (mdata->dev_comp->need_pad_sel) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) 		if (mdata->pad_num != master->num_chipselect) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) 			dev_err(&pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) 				"pad_num does not match num_chipselect(%d != %d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) 				mdata->pad_num, master->num_chipselect);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) 			ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) 			goto err_disable_runtime_pm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) 		if (!master->cs_gpios && master->num_chipselect > 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) 			dev_err(&pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) 				"cs_gpios not specified and num_chipselect > 1\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) 			ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) 			goto err_disable_runtime_pm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) 		if (master->cs_gpios) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) 			for (i = 0; i < master->num_chipselect; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) 				ret = devm_gpio_request(&pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) 							master->cs_gpios[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) 							dev_name(&pdev->dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) 				if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) 					dev_err(&pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) 						"can't get CS GPIO %i\n", i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) 					goto err_disable_runtime_pm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) 	if (mdata->dev_comp->dma_ext)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) 		addr_bits = DMA_ADDR_EXT_BITS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) 		addr_bits = DMA_ADDR_DEF_BITS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) 	ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(addr_bits));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) 		dev_notice(&pdev->dev, "SPI dma_set_mask(%d) failed, ret:%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) 			   addr_bits, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) err_disable_runtime_pm:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) 	pm_runtime_disable(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) err_put_master:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) 	spi_master_put(master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) static int mtk_spi_remove(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) 	struct spi_master *master = platform_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) 	struct mtk_spi *mdata = spi_master_get_devdata(master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) 	pm_runtime_disable(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) 	mtk_spi_reset(mdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) #ifdef CONFIG_PM_SLEEP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) static int mtk_spi_suspend(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) 	struct spi_master *master = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) 	struct mtk_spi *mdata = spi_master_get_devdata(master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) 	ret = spi_master_suspend(master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) 	if (!pm_runtime_suspended(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) 		clk_disable_unprepare(mdata->spi_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) static int mtk_spi_resume(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) 	struct spi_master *master = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) 	struct mtk_spi *mdata = spi_master_get_devdata(master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) 	if (!pm_runtime_suspended(dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) 		ret = clk_prepare_enable(mdata->spi_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) 		if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) 			dev_err(dev, "failed to enable spi_clk (%d)\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) 			return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) 	ret = spi_master_resume(master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) 		clk_disable_unprepare(mdata->spi_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) #endif /* CONFIG_PM_SLEEP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) #ifdef CONFIG_PM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) static int mtk_spi_runtime_suspend(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) 	struct spi_master *master = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) 	struct mtk_spi *mdata = spi_master_get_devdata(master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) 	clk_disable_unprepare(mdata->spi_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) static int mtk_spi_runtime_resume(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) 	struct spi_master *master = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) 	struct mtk_spi *mdata = spi_master_get_devdata(master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) 	ret = clk_prepare_enable(mdata->spi_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) 	if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) 		dev_err(dev, "failed to enable spi_clk (%d)\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) #endif /* CONFIG_PM */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) static const struct dev_pm_ops mtk_spi_pm = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) 	SET_SYSTEM_SLEEP_PM_OPS(mtk_spi_suspend, mtk_spi_resume)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) 	SET_RUNTIME_PM_OPS(mtk_spi_runtime_suspend,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) 			   mtk_spi_runtime_resume, NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) static struct platform_driver mtk_spi_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) 	.driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) 		.name = "mtk-spi",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) 		.pm	= &mtk_spi_pm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) 		.of_match_table = mtk_spi_of_match,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) 	.probe = mtk_spi_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) 	.remove = mtk_spi_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) module_platform_driver(mtk_spi_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) MODULE_DESCRIPTION("MTK SPI Controller driver");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) MODULE_AUTHOR("Leilk Liu <leilk.liu@mediatek.com>");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) MODULE_LICENSE("GPL v2");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) MODULE_ALIAS("platform:mtk-spi");