^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) // Copyright (C) 2018 Spreadtrum Communications Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) #include <linux/clk.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) #include <linux/dmaengine.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include <linux/dma-mapping.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/dma/sprd-dma.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/iopoll.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/of.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/of_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/of_dma.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/platform_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/pm_runtime.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/spi/spi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #define SPRD_SPI_TXD 0x0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #define SPRD_SPI_CLKD 0x4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #define SPRD_SPI_CTL0 0x8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #define SPRD_SPI_CTL1 0xc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #define SPRD_SPI_CTL2 0x10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #define SPRD_SPI_CTL3 0x14
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #define SPRD_SPI_CTL4 0x18
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #define SPRD_SPI_CTL5 0x1c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #define SPRD_SPI_INT_EN 0x20
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #define SPRD_SPI_INT_CLR 0x24
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #define SPRD_SPI_INT_RAW_STS 0x28
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #define SPRD_SPI_INT_MASK_STS 0x2c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #define SPRD_SPI_STS1 0x30
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #define SPRD_SPI_STS2 0x34
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #define SPRD_SPI_DSP_WAIT 0x38
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #define SPRD_SPI_STS3 0x3c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #define SPRD_SPI_CTL6 0x40
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #define SPRD_SPI_STS4 0x44
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #define SPRD_SPI_FIFO_RST 0x48
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #define SPRD_SPI_CTL7 0x4c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #define SPRD_SPI_STS5 0x50
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #define SPRD_SPI_CTL8 0x54
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #define SPRD_SPI_CTL9 0x58
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #define SPRD_SPI_CTL10 0x5c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) #define SPRD_SPI_CTL11 0x60
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #define SPRD_SPI_CTL12 0x64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) #define SPRD_SPI_STS6 0x68
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) #define SPRD_SPI_STS7 0x6c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) #define SPRD_SPI_STS8 0x70
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) #define SPRD_SPI_STS9 0x74
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) /* Bits & mask definition for register CTL0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) #define SPRD_SPI_SCK_REV BIT(13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) #define SPRD_SPI_NG_TX BIT(1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) #define SPRD_SPI_NG_RX BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) #define SPRD_SPI_CHNL_LEN_MASK GENMASK(4, 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) #define SPRD_SPI_CSN_MASK GENMASK(11, 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) #define SPRD_SPI_CS0_VALID BIT(8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) /* Bits & mask definition for register SPI_INT_EN */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) #define SPRD_SPI_TX_END_INT_EN BIT(8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) #define SPRD_SPI_RX_END_INT_EN BIT(9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) /* Bits & mask definition for register SPI_INT_RAW_STS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) #define SPRD_SPI_TX_END_RAW BIT(8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) #define SPRD_SPI_RX_END_RAW BIT(9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) /* Bits & mask definition for register SPI_INT_CLR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) #define SPRD_SPI_TX_END_CLR BIT(8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) #define SPRD_SPI_RX_END_CLR BIT(9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) /* Bits & mask definition for register INT_MASK_STS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) #define SPRD_SPI_MASK_RX_END BIT(9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) #define SPRD_SPI_MASK_TX_END BIT(8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) /* Bits & mask definition for register STS2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) #define SPRD_SPI_TX_BUSY BIT(8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) /* Bits & mask definition for register CTL1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) #define SPRD_SPI_RX_MODE BIT(12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) #define SPRD_SPI_TX_MODE BIT(13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) #define SPRD_SPI_RTX_MD_MASK GENMASK(13, 12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) /* Bits & mask definition for register CTL2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) #define SPRD_SPI_DMA_EN BIT(6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) /* Bits & mask definition for register CTL4 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) #define SPRD_SPI_START_RX BIT(9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) #define SPRD_SPI_ONLY_RECV_MASK GENMASK(8, 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) /* Bits & mask definition for register SPI_INT_CLR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) #define SPRD_SPI_RX_END_INT_CLR BIT(9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) #define SPRD_SPI_TX_END_INT_CLR BIT(8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) /* Bits & mask definition for register SPI_INT_RAW */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) #define SPRD_SPI_RX_END_IRQ BIT(9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) #define SPRD_SPI_TX_END_IRQ BIT(8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) /* Bits & mask definition for register CTL12 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) #define SPRD_SPI_SW_RX_REQ BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) #define SPRD_SPI_SW_TX_REQ BIT(1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) /* Bits & mask definition for register CTL7 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) #define SPRD_SPI_DATA_LINE2_EN BIT(15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) #define SPRD_SPI_MODE_MASK GENMASK(5, 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) #define SPRD_SPI_MODE_OFFSET 3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) #define SPRD_SPI_3WIRE_MODE 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) #define SPRD_SPI_4WIRE_MODE 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) /* Bits & mask definition for register CTL8 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) #define SPRD_SPI_TX_MAX_LEN_MASK GENMASK(19, 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) #define SPRD_SPI_TX_LEN_H_MASK GENMASK(3, 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) #define SPRD_SPI_TX_LEN_H_OFFSET 16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) /* Bits & mask definition for register CTL9 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) #define SPRD_SPI_TX_LEN_L_MASK GENMASK(15, 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) /* Bits & mask definition for register CTL10 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) #define SPRD_SPI_RX_MAX_LEN_MASK GENMASK(19, 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) #define SPRD_SPI_RX_LEN_H_MASK GENMASK(3, 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) #define SPRD_SPI_RX_LEN_H_OFFSET 16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) /* Bits & mask definition for register CTL11 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) #define SPRD_SPI_RX_LEN_L_MASK GENMASK(15, 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) /* Default & maximum word delay cycles */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) #define SPRD_SPI_MIN_DELAY_CYCLE 14
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) #define SPRD_SPI_MAX_DELAY_CYCLE 130
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) #define SPRD_SPI_FIFO_SIZE 32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) #define SPRD_SPI_CHIP_CS_NUM 0x4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) #define SPRD_SPI_CHNL_LEN 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) #define SPRD_SPI_DEFAULT_SOURCE 26000000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) #define SPRD_SPI_MAX_SPEED_HZ 48000000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) #define SPRD_SPI_AUTOSUSPEND_DELAY 100
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) #define SPRD_SPI_DMA_STEP 8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) enum sprd_spi_dma_channel {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) SPRD_SPI_RX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) SPRD_SPI_TX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) SPRD_SPI_MAX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) struct sprd_spi_dma {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) bool enable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) struct dma_chan *dma_chan[SPRD_SPI_MAX];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) enum dma_slave_buswidth width;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) u32 fragmens_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) u32 rx_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) struct sprd_spi {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) void __iomem *base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) phys_addr_t phy_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) struct device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) struct clk *clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) int irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) u32 src_clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) u32 hw_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) u32 trans_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) u32 trans_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) u32 word_delay;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) u32 hw_speed_hz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) u32 len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) int status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) struct sprd_spi_dma dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) struct completion xfer_completion;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) const void *tx_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) void *rx_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) int (*read_bufs)(struct sprd_spi *ss, u32 len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) int (*write_bufs)(struct sprd_spi *ss, u32 len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) static u32 sprd_spi_transfer_max_timeout(struct sprd_spi *ss,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) struct spi_transfer *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) * The time spent on transmission of the full FIFO data is the maximum
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) * SPI transmission time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) u32 size = t->bits_per_word * SPRD_SPI_FIFO_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) u32 bit_time_us = DIV_ROUND_UP(USEC_PER_SEC, ss->hw_speed_hz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) u32 total_time_us = size * bit_time_us;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) * There is an interval between data and the data in our SPI hardware,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) * so the total transmission time need add the interval time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) u32 interval_cycle = SPRD_SPI_FIFO_SIZE * ss->word_delay;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) u32 interval_time_us = DIV_ROUND_UP(interval_cycle * USEC_PER_SEC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) ss->src_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) return total_time_us + interval_time_us;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) static int sprd_spi_wait_for_tx_end(struct sprd_spi *ss, struct spi_transfer *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) u32 val, us;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) us = sprd_spi_transfer_max_timeout(ss, t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) ret = readl_relaxed_poll_timeout(ss->base + SPRD_SPI_INT_RAW_STS, val,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) val & SPRD_SPI_TX_END_IRQ, 0, us);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) dev_err(ss->dev, "SPI error, spi send timeout!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) ret = readl_relaxed_poll_timeout(ss->base + SPRD_SPI_STS2, val,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) !(val & SPRD_SPI_TX_BUSY), 0, us);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) dev_err(ss->dev, "SPI error, spi busy timeout!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) writel_relaxed(SPRD_SPI_TX_END_INT_CLR, ss->base + SPRD_SPI_INT_CLR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) static int sprd_spi_wait_for_rx_end(struct sprd_spi *ss, struct spi_transfer *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) u32 val, us;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) us = sprd_spi_transfer_max_timeout(ss, t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) ret = readl_relaxed_poll_timeout(ss->base + SPRD_SPI_INT_RAW_STS, val,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) val & SPRD_SPI_RX_END_IRQ, 0, us);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) dev_err(ss->dev, "SPI error, spi rx timeout!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) writel_relaxed(SPRD_SPI_RX_END_INT_CLR, ss->base + SPRD_SPI_INT_CLR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) static void sprd_spi_tx_req(struct sprd_spi *ss)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) writel_relaxed(SPRD_SPI_SW_TX_REQ, ss->base + SPRD_SPI_CTL12);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) static void sprd_spi_rx_req(struct sprd_spi *ss)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) writel_relaxed(SPRD_SPI_SW_RX_REQ, ss->base + SPRD_SPI_CTL12);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) static void sprd_spi_enter_idle(struct sprd_spi *ss)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) u32 val = readl_relaxed(ss->base + SPRD_SPI_CTL1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) val &= ~SPRD_SPI_RTX_MD_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) writel_relaxed(val, ss->base + SPRD_SPI_CTL1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) static void sprd_spi_set_transfer_bits(struct sprd_spi *ss, u32 bits)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) u32 val = readl_relaxed(ss->base + SPRD_SPI_CTL0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) /* Set the valid bits for every transaction */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) val &= ~(SPRD_SPI_CHNL_LEN_MASK << SPRD_SPI_CHNL_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) val |= bits << SPRD_SPI_CHNL_LEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) writel_relaxed(val, ss->base + SPRD_SPI_CTL0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) static void sprd_spi_set_tx_length(struct sprd_spi *ss, u32 length)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) u32 val = readl_relaxed(ss->base + SPRD_SPI_CTL8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) length &= SPRD_SPI_TX_MAX_LEN_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) val &= ~SPRD_SPI_TX_LEN_H_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) val |= length >> SPRD_SPI_TX_LEN_H_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) writel_relaxed(val, ss->base + SPRD_SPI_CTL8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) val = length & SPRD_SPI_TX_LEN_L_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) writel_relaxed(val, ss->base + SPRD_SPI_CTL9);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) static void sprd_spi_set_rx_length(struct sprd_spi *ss, u32 length)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) u32 val = readl_relaxed(ss->base + SPRD_SPI_CTL10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) length &= SPRD_SPI_RX_MAX_LEN_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) val &= ~SPRD_SPI_RX_LEN_H_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) val |= length >> SPRD_SPI_RX_LEN_H_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) writel_relaxed(val, ss->base + SPRD_SPI_CTL10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) val = length & SPRD_SPI_RX_LEN_L_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) writel_relaxed(val, ss->base + SPRD_SPI_CTL11);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) static void sprd_spi_chipselect(struct spi_device *sdev, bool cs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) struct spi_controller *sctlr = sdev->controller;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) struct sprd_spi *ss = spi_controller_get_devdata(sctlr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) val = readl_relaxed(ss->base + SPRD_SPI_CTL0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) /* The SPI controller will pull down CS pin if cs is 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) if (!cs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) val &= ~SPRD_SPI_CS0_VALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) writel_relaxed(val, ss->base + SPRD_SPI_CTL0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) val |= SPRD_SPI_CSN_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) writel_relaxed(val, ss->base + SPRD_SPI_CTL0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) static int sprd_spi_write_only_receive(struct sprd_spi *ss, u32 len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) /* Clear the start receive bit and reset receive data number */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) val = readl_relaxed(ss->base + SPRD_SPI_CTL4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) val &= ~(SPRD_SPI_START_RX | SPRD_SPI_ONLY_RECV_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) writel_relaxed(val, ss->base + SPRD_SPI_CTL4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) /* Set the receive data length */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) val = readl_relaxed(ss->base + SPRD_SPI_CTL4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) val |= len & SPRD_SPI_ONLY_RECV_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) writel_relaxed(val, ss->base + SPRD_SPI_CTL4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) /* Trigger to receive data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) val = readl_relaxed(ss->base + SPRD_SPI_CTL4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) val |= SPRD_SPI_START_RX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) writel_relaxed(val, ss->base + SPRD_SPI_CTL4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) return len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) static int sprd_spi_write_bufs_u8(struct sprd_spi *ss, u32 len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) u8 *tx_p = (u8 *)ss->tx_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) for (i = 0; i < len; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) writeb_relaxed(tx_p[i], ss->base + SPRD_SPI_TXD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) ss->tx_buf += i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) return i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) static int sprd_spi_write_bufs_u16(struct sprd_spi *ss, u32 len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) u16 *tx_p = (u16 *)ss->tx_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) for (i = 0; i < len; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) writew_relaxed(tx_p[i], ss->base + SPRD_SPI_TXD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) ss->tx_buf += i << 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) return i << 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) static int sprd_spi_write_bufs_u32(struct sprd_spi *ss, u32 len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) u32 *tx_p = (u32 *)ss->tx_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) for (i = 0; i < len; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) writel_relaxed(tx_p[i], ss->base + SPRD_SPI_TXD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) ss->tx_buf += i << 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) return i << 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) static int sprd_spi_read_bufs_u8(struct sprd_spi *ss, u32 len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) u8 *rx_p = (u8 *)ss->rx_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) for (i = 0; i < len; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) rx_p[i] = readb_relaxed(ss->base + SPRD_SPI_TXD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) ss->rx_buf += i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) return i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) static int sprd_spi_read_bufs_u16(struct sprd_spi *ss, u32 len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) u16 *rx_p = (u16 *)ss->rx_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) for (i = 0; i < len; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) rx_p[i] = readw_relaxed(ss->base + SPRD_SPI_TXD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) ss->rx_buf += i << 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) return i << 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) static int sprd_spi_read_bufs_u32(struct sprd_spi *ss, u32 len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) u32 *rx_p = (u32 *)ss->rx_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) for (i = 0; i < len; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) rx_p[i] = readl_relaxed(ss->base + SPRD_SPI_TXD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) ss->rx_buf += i << 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) return i << 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) static int sprd_spi_txrx_bufs(struct spi_device *sdev, struct spi_transfer *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) struct sprd_spi *ss = spi_controller_get_devdata(sdev->controller);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) u32 trans_len = ss->trans_len, len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) int ret, write_size = 0, read_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) while (trans_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) len = trans_len > SPRD_SPI_FIFO_SIZE ? SPRD_SPI_FIFO_SIZE :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) trans_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) if (ss->trans_mode & SPRD_SPI_TX_MODE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) sprd_spi_set_tx_length(ss, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) write_size += ss->write_bufs(ss, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) * For our 3 wires mode or dual TX line mode, we need
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) * to request the controller to transfer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) if (ss->hw_mode & SPI_3WIRE || ss->hw_mode & SPI_TX_DUAL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) sprd_spi_tx_req(ss);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) ret = sprd_spi_wait_for_tx_end(ss, t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) sprd_spi_set_rx_length(ss, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) * For our 3 wires mode or dual TX line mode, we need
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) * to request the controller to read.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) if (ss->hw_mode & SPI_3WIRE || ss->hw_mode & SPI_TX_DUAL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) sprd_spi_rx_req(ss);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) write_size += ss->write_bufs(ss, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) ret = sprd_spi_wait_for_rx_end(ss, t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) goto complete;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) if (ss->trans_mode & SPRD_SPI_RX_MODE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) read_size += ss->read_bufs(ss, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) trans_len -= len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) if (ss->trans_mode & SPRD_SPI_TX_MODE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) ret = write_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) ret = read_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) complete:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) sprd_spi_enter_idle(ss);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) static void sprd_spi_irq_enable(struct sprd_spi *ss)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) /* Clear interrupt status before enabling interrupt. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) writel_relaxed(SPRD_SPI_TX_END_CLR | SPRD_SPI_RX_END_CLR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) ss->base + SPRD_SPI_INT_CLR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) /* Enable SPI interrupt only in DMA mode. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) val = readl_relaxed(ss->base + SPRD_SPI_INT_EN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) writel_relaxed(val | SPRD_SPI_TX_END_INT_EN |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) SPRD_SPI_RX_END_INT_EN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) ss->base + SPRD_SPI_INT_EN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) static void sprd_spi_irq_disable(struct sprd_spi *ss)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) writel_relaxed(0, ss->base + SPRD_SPI_INT_EN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) static void sprd_spi_dma_enable(struct sprd_spi *ss, bool enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) u32 val = readl_relaxed(ss->base + SPRD_SPI_CTL2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) if (enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) val |= SPRD_SPI_DMA_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) val &= ~SPRD_SPI_DMA_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) writel_relaxed(val, ss->base + SPRD_SPI_CTL2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) static int sprd_spi_dma_submit(struct dma_chan *dma_chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) struct dma_slave_config *c,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) struct sg_table *sg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) enum dma_transfer_direction dir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) struct dma_async_tx_descriptor *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) dma_cookie_t cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) ret = dmaengine_slave_config(dma_chan, c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) flags = SPRD_DMA_FLAGS(SPRD_DMA_CHN_MODE_NONE, SPRD_DMA_NO_TRG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) SPRD_DMA_FRAG_REQ, SPRD_DMA_TRANS_INT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) desc = dmaengine_prep_slave_sg(dma_chan, sg->sgl, sg->nents, dir, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) if (!desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) cookie = dmaengine_submit(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) if (dma_submit_error(cookie))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) return dma_submit_error(cookie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) dma_async_issue_pending(dma_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) static int sprd_spi_dma_rx_config(struct sprd_spi *ss, struct spi_transfer *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) struct dma_chan *dma_chan = ss->dma.dma_chan[SPRD_SPI_RX];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) struct dma_slave_config config = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) .src_addr = ss->phy_base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) .src_addr_width = ss->dma.width,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) .dst_addr_width = ss->dma.width,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) .dst_maxburst = ss->dma.fragmens_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) ret = sprd_spi_dma_submit(dma_chan, &config, &t->rx_sg, DMA_DEV_TO_MEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) return ss->dma.rx_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) static int sprd_spi_dma_tx_config(struct sprd_spi *ss, struct spi_transfer *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) struct dma_chan *dma_chan = ss->dma.dma_chan[SPRD_SPI_TX];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) struct dma_slave_config config = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) .dst_addr = ss->phy_base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) .src_addr_width = ss->dma.width,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) .dst_addr_width = ss->dma.width,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) .src_maxburst = ss->dma.fragmens_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) ret = sprd_spi_dma_submit(dma_chan, &config, &t->tx_sg, DMA_MEM_TO_DEV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) return t->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) static int sprd_spi_dma_request(struct sprd_spi *ss)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) ss->dma.dma_chan[SPRD_SPI_RX] = dma_request_chan(ss->dev, "rx_chn");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) if (IS_ERR_OR_NULL(ss->dma.dma_chan[SPRD_SPI_RX]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) return dev_err_probe(ss->dev, PTR_ERR(ss->dma.dma_chan[SPRD_SPI_RX]),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) "request RX DMA channel failed!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) ss->dma.dma_chan[SPRD_SPI_TX] = dma_request_chan(ss->dev, "tx_chn");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) if (IS_ERR_OR_NULL(ss->dma.dma_chan[SPRD_SPI_TX])) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) dma_release_channel(ss->dma.dma_chan[SPRD_SPI_RX]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) return dev_err_probe(ss->dev, PTR_ERR(ss->dma.dma_chan[SPRD_SPI_TX]),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) "request TX DMA channel failed!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) static void sprd_spi_dma_release(struct sprd_spi *ss)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) if (ss->dma.dma_chan[SPRD_SPI_RX])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) dma_release_channel(ss->dma.dma_chan[SPRD_SPI_RX]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) if (ss->dma.dma_chan[SPRD_SPI_TX])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) dma_release_channel(ss->dma.dma_chan[SPRD_SPI_TX]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) static int sprd_spi_dma_txrx_bufs(struct spi_device *sdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) struct spi_transfer *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) struct sprd_spi *ss = spi_master_get_devdata(sdev->master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) u32 trans_len = ss->trans_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) int ret, write_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) reinit_completion(&ss->xfer_completion);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) sprd_spi_irq_enable(ss);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) if (ss->trans_mode & SPRD_SPI_TX_MODE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) write_size = sprd_spi_dma_tx_config(ss, t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) sprd_spi_set_tx_length(ss, trans_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) * For our 3 wires mode or dual TX line mode, we need
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) * to request the controller to transfer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) if (ss->hw_mode & SPI_3WIRE || ss->hw_mode & SPI_TX_DUAL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) sprd_spi_tx_req(ss);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) sprd_spi_set_rx_length(ss, trans_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) * For our 3 wires mode or dual TX line mode, we need
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) * to request the controller to read.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) if (ss->hw_mode & SPI_3WIRE || ss->hw_mode & SPI_TX_DUAL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) sprd_spi_rx_req(ss);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) write_size = ss->write_bufs(ss, trans_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) if (write_size < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) ret = write_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) dev_err(ss->dev, "failed to write, ret = %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) goto trans_complete;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) if (ss->trans_mode & SPRD_SPI_RX_MODE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) * Set up the DMA receive data length, which must be an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) * integral multiple of fragment length. But when the length
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) * of received data is less than fragment length, DMA can be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) * configured to receive data according to the actual length
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) * of received data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) ss->dma.rx_len = t->len > ss->dma.fragmens_len ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) (t->len - t->len % ss->dma.fragmens_len) :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) t->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) ret = sprd_spi_dma_rx_config(ss, t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) dev_err(&sdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) "failed to configure rx DMA, ret = %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) goto trans_complete;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) sprd_spi_dma_enable(ss, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) wait_for_completion(&(ss->xfer_completion));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) if (ss->trans_mode & SPRD_SPI_TX_MODE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) ret = write_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) ret = ss->dma.rx_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) trans_complete:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) sprd_spi_dma_enable(ss, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) sprd_spi_enter_idle(ss);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) sprd_spi_irq_disable(ss);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) static void sprd_spi_set_speed(struct sprd_spi *ss, u32 speed_hz)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) * From SPI datasheet, the prescale calculation formula:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) * prescale = SPI source clock / (2 * SPI_freq) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) u32 clk_div = DIV_ROUND_UP(ss->src_clk, speed_hz << 1) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) /* Save the real hardware speed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) ss->hw_speed_hz = (ss->src_clk >> 1) / (clk_div + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) writel_relaxed(clk_div, ss->base + SPRD_SPI_CLKD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) static int sprd_spi_init_hw(struct sprd_spi *ss, struct spi_transfer *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) struct spi_delay *d = &t->word_delay;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) u16 word_delay, interval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) if (d->unit != SPI_DELAY_UNIT_SCK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) val = readl_relaxed(ss->base + SPRD_SPI_CTL0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) val &= ~(SPRD_SPI_SCK_REV | SPRD_SPI_NG_TX | SPRD_SPI_NG_RX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) /* Set default chip selection, clock phase and clock polarity */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) val |= ss->hw_mode & SPI_CPHA ? SPRD_SPI_NG_RX : SPRD_SPI_NG_TX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) val |= ss->hw_mode & SPI_CPOL ? SPRD_SPI_SCK_REV : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) writel_relaxed(val, ss->base + SPRD_SPI_CTL0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) * Set the intervals of two SPI frames, and the inteval calculation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) * formula as below per datasheet:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) * interval time (source clock cycles) = interval * 4 + 10.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) word_delay = clamp_t(u16, d->value, SPRD_SPI_MIN_DELAY_CYCLE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) SPRD_SPI_MAX_DELAY_CYCLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) interval = DIV_ROUND_UP(word_delay - 10, 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) ss->word_delay = interval * 4 + 10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) writel_relaxed(interval, ss->base + SPRD_SPI_CTL5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) /* Reset SPI fifo */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) writel_relaxed(1, ss->base + SPRD_SPI_FIFO_RST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) writel_relaxed(0, ss->base + SPRD_SPI_FIFO_RST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) /* Set SPI work mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) val = readl_relaxed(ss->base + SPRD_SPI_CTL7);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) val &= ~SPRD_SPI_MODE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) if (ss->hw_mode & SPI_3WIRE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) val |= SPRD_SPI_3WIRE_MODE << SPRD_SPI_MODE_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) val |= SPRD_SPI_4WIRE_MODE << SPRD_SPI_MODE_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) if (ss->hw_mode & SPI_TX_DUAL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) val |= SPRD_SPI_DATA_LINE2_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) val &= ~SPRD_SPI_DATA_LINE2_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) writel_relaxed(val, ss->base + SPRD_SPI_CTL7);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) static int sprd_spi_setup_transfer(struct spi_device *sdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) struct spi_transfer *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) struct sprd_spi *ss = spi_controller_get_devdata(sdev->controller);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) u8 bits_per_word = t->bits_per_word;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) u32 val, mode = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) ss->len = t->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) ss->tx_buf = t->tx_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) ss->rx_buf = t->rx_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) ss->hw_mode = sdev->mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) ret = sprd_spi_init_hw(ss, t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) /* Set tansfer speed and valid bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) sprd_spi_set_speed(ss, t->speed_hz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) sprd_spi_set_transfer_bits(ss, bits_per_word);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) if (bits_per_word > 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) bits_per_word = round_up(bits_per_word, 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) bits_per_word = round_up(bits_per_word, 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) switch (bits_per_word) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) case 8:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) ss->trans_len = t->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) ss->read_bufs = sprd_spi_read_bufs_u8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) ss->write_bufs = sprd_spi_write_bufs_u8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) ss->dma.width = DMA_SLAVE_BUSWIDTH_1_BYTE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) ss->dma.fragmens_len = SPRD_SPI_DMA_STEP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) case 16:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) ss->trans_len = t->len >> 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) ss->read_bufs = sprd_spi_read_bufs_u16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) ss->write_bufs = sprd_spi_write_bufs_u16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) ss->dma.width = DMA_SLAVE_BUSWIDTH_2_BYTES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) ss->dma.fragmens_len = SPRD_SPI_DMA_STEP << 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) case 32:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) ss->trans_len = t->len >> 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) ss->read_bufs = sprd_spi_read_bufs_u32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) ss->write_bufs = sprd_spi_write_bufs_u32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) ss->dma.width = DMA_SLAVE_BUSWIDTH_4_BYTES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) ss->dma.fragmens_len = SPRD_SPI_DMA_STEP << 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) /* Set transfer read or write mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) val = readl_relaxed(ss->base + SPRD_SPI_CTL1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) val &= ~SPRD_SPI_RTX_MD_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) if (t->tx_buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) mode |= SPRD_SPI_TX_MODE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) if (t->rx_buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) mode |= SPRD_SPI_RX_MODE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) writel_relaxed(val | mode, ss->base + SPRD_SPI_CTL1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) ss->trans_mode = mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) * If in only receive mode, we need to trigger the SPI controller to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) * receive data automatically.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) if (ss->trans_mode == SPRD_SPI_RX_MODE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) ss->write_bufs = sprd_spi_write_only_receive;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) static int sprd_spi_transfer_one(struct spi_controller *sctlr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) struct spi_device *sdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) struct spi_transfer *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) ret = sprd_spi_setup_transfer(sdev, t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) goto setup_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) if (sctlr->can_dma(sctlr, sdev, t))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) ret = sprd_spi_dma_txrx_bufs(sdev, t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) ret = sprd_spi_txrx_bufs(sdev, t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) if (ret == t->len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) else if (ret >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) ret = -EREMOTEIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) setup_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) spi_finalize_current_transfer(sctlr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) static irqreturn_t sprd_spi_handle_irq(int irq, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) struct sprd_spi *ss = (struct sprd_spi *)data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) u32 val = readl_relaxed(ss->base + SPRD_SPI_INT_MASK_STS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) if (val & SPRD_SPI_MASK_TX_END) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) writel_relaxed(SPRD_SPI_TX_END_CLR, ss->base + SPRD_SPI_INT_CLR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) if (!(ss->trans_mode & SPRD_SPI_RX_MODE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) complete(&ss->xfer_completion);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) if (val & SPRD_SPI_MASK_RX_END) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) writel_relaxed(SPRD_SPI_RX_END_CLR, ss->base + SPRD_SPI_INT_CLR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) if (ss->dma.rx_len < ss->len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) ss->rx_buf += ss->dma.rx_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) ss->dma.rx_len +=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) ss->read_bufs(ss, ss->len - ss->dma.rx_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) complete(&ss->xfer_completion);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) return IRQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) static int sprd_spi_irq_init(struct platform_device *pdev, struct sprd_spi *ss)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) ss->irq = platform_get_irq(pdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) if (ss->irq < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) return ss->irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) ret = devm_request_irq(&pdev->dev, ss->irq, sprd_spi_handle_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) 0, pdev->name, ss);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) dev_err(&pdev->dev, "failed to request spi irq %d, ret = %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) ss->irq, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) static int sprd_spi_clk_init(struct platform_device *pdev, struct sprd_spi *ss)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) struct clk *clk_spi, *clk_parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) clk_spi = devm_clk_get(&pdev->dev, "spi");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) if (IS_ERR(clk_spi)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) dev_warn(&pdev->dev, "can't get the spi clock\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) clk_spi = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) clk_parent = devm_clk_get(&pdev->dev, "source");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) if (IS_ERR(clk_parent)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) dev_warn(&pdev->dev, "can't get the source clock\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) clk_parent = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) ss->clk = devm_clk_get(&pdev->dev, "enable");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) if (IS_ERR(ss->clk)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) dev_err(&pdev->dev, "can't get the enable clock\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) return PTR_ERR(ss->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) if (!clk_set_parent(clk_spi, clk_parent))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) ss->src_clk = clk_get_rate(clk_spi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) ss->src_clk = SPRD_SPI_DEFAULT_SOURCE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) static bool sprd_spi_can_dma(struct spi_controller *sctlr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) struct spi_device *spi, struct spi_transfer *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) struct sprd_spi *ss = spi_controller_get_devdata(sctlr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) return ss->dma.enable && (t->len > SPRD_SPI_FIFO_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) static int sprd_spi_dma_init(struct platform_device *pdev, struct sprd_spi *ss)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) ret = sprd_spi_dma_request(ss);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) if (ret == -EPROBE_DEFER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) dev_warn(&pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) "failed to request dma, enter no dma mode, ret = %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) ss->dma.enable = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) static int sprd_spi_probe(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) struct spi_controller *sctlr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) struct resource *res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) struct sprd_spi *ss;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) pdev->id = of_alias_get_id(pdev->dev.of_node, "spi");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) sctlr = spi_alloc_master(&pdev->dev, sizeof(*ss));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) if (!sctlr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) ss = spi_controller_get_devdata(sctlr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) ss->base = devm_ioremap_resource(&pdev->dev, res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) if (IS_ERR(ss->base)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) ret = PTR_ERR(ss->base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) goto free_controller;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) ss->phy_base = res->start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) ss->dev = &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) sctlr->dev.of_node = pdev->dev.of_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) sctlr->mode_bits = SPI_CPOL | SPI_CPHA | SPI_3WIRE | SPI_TX_DUAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) sctlr->bus_num = pdev->id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) sctlr->set_cs = sprd_spi_chipselect;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) sctlr->transfer_one = sprd_spi_transfer_one;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) sctlr->can_dma = sprd_spi_can_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) sctlr->auto_runtime_pm = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) sctlr->max_speed_hz = min_t(u32, ss->src_clk >> 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) SPRD_SPI_MAX_SPEED_HZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) init_completion(&ss->xfer_completion);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) platform_set_drvdata(pdev, sctlr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) ret = sprd_spi_clk_init(pdev, ss);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) goto free_controller;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) ret = sprd_spi_irq_init(pdev, ss);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) goto free_controller;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) ret = sprd_spi_dma_init(pdev, ss);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) goto free_controller;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) ret = clk_prepare_enable(ss->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) goto release_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) ret = pm_runtime_set_active(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) goto disable_clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) pm_runtime_set_autosuspend_delay(&pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) SPRD_SPI_AUTOSUSPEND_DELAY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) pm_runtime_use_autosuspend(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) pm_runtime_enable(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) ret = pm_runtime_get_sync(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) dev_err(&pdev->dev, "failed to resume SPI controller\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) goto err_rpm_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) ret = devm_spi_register_controller(&pdev->dev, sctlr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) goto err_rpm_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) pm_runtime_mark_last_busy(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) pm_runtime_put_autosuspend(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) err_rpm_put:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) pm_runtime_put_noidle(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) pm_runtime_disable(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) disable_clk:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) clk_disable_unprepare(ss->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) release_dma:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) sprd_spi_dma_release(ss);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) free_controller:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) spi_controller_put(sctlr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) static int sprd_spi_remove(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) struct spi_controller *sctlr = platform_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) struct sprd_spi *ss = spi_controller_get_devdata(sctlr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) ret = pm_runtime_get_sync(ss->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) pm_runtime_put_noidle(ss->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) dev_err(ss->dev, "failed to resume SPI controller\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) spi_controller_suspend(sctlr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) if (ss->dma.enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) sprd_spi_dma_release(ss);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) clk_disable_unprepare(ss->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) pm_runtime_put_noidle(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) pm_runtime_disable(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) static int __maybe_unused sprd_spi_runtime_suspend(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) struct spi_controller *sctlr = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) struct sprd_spi *ss = spi_controller_get_devdata(sctlr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) if (ss->dma.enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) sprd_spi_dma_release(ss);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) clk_disable_unprepare(ss->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) static int __maybe_unused sprd_spi_runtime_resume(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) struct spi_controller *sctlr = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) struct sprd_spi *ss = spi_controller_get_devdata(sctlr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) ret = clk_prepare_enable(ss->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) if (!ss->dma.enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) ret = sprd_spi_dma_request(ss);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) clk_disable_unprepare(ss->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) static const struct dev_pm_ops sprd_spi_pm_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) SET_RUNTIME_PM_OPS(sprd_spi_runtime_suspend,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) sprd_spi_runtime_resume, NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) static const struct of_device_id sprd_spi_of_match[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) { .compatible = "sprd,sc9860-spi", },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) { /* sentinel */ }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) MODULE_DEVICE_TABLE(of, sprd_spi_of_match);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) static struct platform_driver sprd_spi_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) .driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) .name = "sprd-spi",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) .of_match_table = sprd_spi_of_match,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) .pm = &sprd_spi_pm_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) .probe = sprd_spi_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) .remove = sprd_spi_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) module_platform_driver(sprd_spi_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) MODULE_DESCRIPTION("Spreadtrum SPI Controller driver");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) MODULE_AUTHOR("Lanqing Liu <lanqing.liu@spreadtrum.com>");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) MODULE_LICENSE("GPL v2");