^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) // spi-uniphier.c - Socionext UniPhier SPI controller driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) // Copyright 2012 Panasonic Corporation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) // Copyright 2016-2018 Socionext Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/bitfield.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/bitops.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/clk.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/dmaengine.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/platform_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/spi/spi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <asm/unaligned.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #define SSI_TIMEOUT_MS 2000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #define SSI_POLL_TIMEOUT_US 200
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #define SSI_MAX_CLK_DIVIDER 254
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #define SSI_MIN_CLK_DIVIDER 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) struct uniphier_spi_priv {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) void __iomem *base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) dma_addr_t base_dma_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) struct clk *clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) struct spi_master *master;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) struct completion xfer_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) unsigned int tx_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) unsigned int rx_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) const u8 *tx_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) u8 *rx_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) atomic_t dma_busy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) bool is_save_param;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) u8 bits_per_word;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) u16 mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) u32 speed_hz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #define SSI_CTL 0x00
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) #define SSI_CTL_EN BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) #define SSI_CKS 0x04
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) #define SSI_CKS_CKRAT_MASK GENMASK(7, 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) #define SSI_CKS_CKPHS BIT(14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) #define SSI_CKS_CKINIT BIT(13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) #define SSI_CKS_CKDLY BIT(12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) #define SSI_TXWDS 0x08
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) #define SSI_TXWDS_WDLEN_MASK GENMASK(13, 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) #define SSI_TXWDS_TDTF_MASK GENMASK(7, 6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) #define SSI_TXWDS_DTLEN_MASK GENMASK(5, 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) #define SSI_RXWDS 0x0c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) #define SSI_RXWDS_DTLEN_MASK GENMASK(5, 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) #define SSI_FPS 0x10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) #define SSI_FPS_FSPOL BIT(15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) #define SSI_FPS_FSTRT BIT(14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) #define SSI_SR 0x14
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) #define SSI_SR_BUSY BIT(7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) #define SSI_SR_RNE BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) #define SSI_IE 0x18
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) #define SSI_IE_TCIE BIT(4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) #define SSI_IE_RCIE BIT(3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) #define SSI_IE_TXRE BIT(2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) #define SSI_IE_RXRE BIT(1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) #define SSI_IE_RORIE BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) #define SSI_IE_ALL_MASK GENMASK(4, 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) #define SSI_IS 0x1c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) #define SSI_IS_RXRS BIT(9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) #define SSI_IS_RCID BIT(3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) #define SSI_IS_RORID BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) #define SSI_IC 0x1c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) #define SSI_IC_TCIC BIT(4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) #define SSI_IC_RCIC BIT(3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) #define SSI_IC_RORIC BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) #define SSI_FC 0x20
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) #define SSI_FC_TXFFL BIT(12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) #define SSI_FC_TXFTH_MASK GENMASK(11, 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) #define SSI_FC_RXFFL BIT(4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) #define SSI_FC_RXFTH_MASK GENMASK(3, 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) #define SSI_TXDR 0x24
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) #define SSI_RXDR 0x24
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) #define SSI_FIFO_DEPTH 8U
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) #define SSI_FIFO_BURST_NUM 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) #define SSI_DMA_RX_BUSY BIT(1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) #define SSI_DMA_TX_BUSY BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) static inline unsigned int bytes_per_word(unsigned int bits)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) return bits <= 8 ? 1 : (bits <= 16 ? 2 : 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) static inline void uniphier_spi_irq_enable(struct uniphier_spi_priv *priv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) u32 mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) val = readl(priv->base + SSI_IE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) val |= mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) writel(val, priv->base + SSI_IE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) static inline void uniphier_spi_irq_disable(struct uniphier_spi_priv *priv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) u32 mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) val = readl(priv->base + SSI_IE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) val &= ~mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) writel(val, priv->base + SSI_IE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) static void uniphier_spi_set_mode(struct spi_device *spi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) struct uniphier_spi_priv *priv = spi_master_get_devdata(spi->master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) u32 val1, val2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) * clock setting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) * CKPHS capture timing. 0:rising edge, 1:falling edge
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) * CKINIT clock initial level. 0:low, 1:high
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) * CKDLY clock delay. 0:no delay, 1:delay depending on FSTRT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) * (FSTRT=0: 1 clock, FSTRT=1: 0.5 clock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) * frame setting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) * FSPOL frame signal porarity. 0: low, 1: high
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) * FSTRT start frame timing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) * 0: rising edge of clock, 1: falling edge of clock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) switch (spi->mode & (SPI_CPOL | SPI_CPHA)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) case SPI_MODE_0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) /* CKPHS=1, CKINIT=0, CKDLY=1, FSTRT=0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) val1 = SSI_CKS_CKPHS | SSI_CKS_CKDLY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) val2 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) case SPI_MODE_1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) /* CKPHS=0, CKINIT=0, CKDLY=0, FSTRT=1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) val1 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) val2 = SSI_FPS_FSTRT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) case SPI_MODE_2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) /* CKPHS=0, CKINIT=1, CKDLY=1, FSTRT=1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) val1 = SSI_CKS_CKINIT | SSI_CKS_CKDLY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) val2 = SSI_FPS_FSTRT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) case SPI_MODE_3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) /* CKPHS=1, CKINIT=1, CKDLY=0, FSTRT=0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) val1 = SSI_CKS_CKPHS | SSI_CKS_CKINIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) val2 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) if (!(spi->mode & SPI_CS_HIGH))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) val2 |= SSI_FPS_FSPOL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) writel(val1, priv->base + SSI_CKS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) writel(val2, priv->base + SSI_FPS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) val1 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) if (spi->mode & SPI_LSB_FIRST)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) val1 |= FIELD_PREP(SSI_TXWDS_TDTF_MASK, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) writel(val1, priv->base + SSI_TXWDS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) writel(val1, priv->base + SSI_RXWDS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) static void uniphier_spi_set_transfer_size(struct spi_device *spi, int size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) struct uniphier_spi_priv *priv = spi_master_get_devdata(spi->master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) val = readl(priv->base + SSI_TXWDS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) val &= ~(SSI_TXWDS_WDLEN_MASK | SSI_TXWDS_DTLEN_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) val |= FIELD_PREP(SSI_TXWDS_WDLEN_MASK, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) val |= FIELD_PREP(SSI_TXWDS_DTLEN_MASK, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) writel(val, priv->base + SSI_TXWDS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) val = readl(priv->base + SSI_RXWDS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) val &= ~SSI_RXWDS_DTLEN_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) val |= FIELD_PREP(SSI_RXWDS_DTLEN_MASK, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) writel(val, priv->base + SSI_RXWDS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) static void uniphier_spi_set_baudrate(struct spi_device *spi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) unsigned int speed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) struct uniphier_spi_priv *priv = spi_master_get_devdata(spi->master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) u32 val, ckdiv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) * the supported rates are even numbers from 4 to 254. (4,6,8...254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) * round up as we look for equal or less speed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) ckdiv = DIV_ROUND_UP(clk_get_rate(priv->clk), speed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) ckdiv = round_up(ckdiv, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) val = readl(priv->base + SSI_CKS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) val &= ~SSI_CKS_CKRAT_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) val |= ckdiv & SSI_CKS_CKRAT_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) writel(val, priv->base + SSI_CKS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) static void uniphier_spi_setup_transfer(struct spi_device *spi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) struct spi_transfer *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) struct uniphier_spi_priv *priv = spi_master_get_devdata(spi->master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) priv->error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) priv->tx_buf = t->tx_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) priv->rx_buf = t->rx_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) priv->tx_bytes = priv->rx_bytes = t->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) if (!priv->is_save_param || priv->mode != spi->mode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) uniphier_spi_set_mode(spi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) priv->mode = spi->mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) priv->is_save_param = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) if (!priv->is_save_param || priv->bits_per_word != t->bits_per_word) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) uniphier_spi_set_transfer_size(spi, t->bits_per_word);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) priv->bits_per_word = t->bits_per_word;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) if (!priv->is_save_param || priv->speed_hz != t->speed_hz) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) uniphier_spi_set_baudrate(spi, t->speed_hz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) priv->speed_hz = t->speed_hz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) priv->is_save_param = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) /* reset FIFOs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) val = SSI_FC_TXFFL | SSI_FC_RXFFL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) writel(val, priv->base + SSI_FC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) static void uniphier_spi_send(struct uniphier_spi_priv *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) int wsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) u32 val = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) wsize = min(bytes_per_word(priv->bits_per_word), priv->tx_bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) priv->tx_bytes -= wsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) if (priv->tx_buf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) switch (wsize) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) val = *priv->tx_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) case 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) val = get_unaligned_le16(priv->tx_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) case 4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) val = get_unaligned_le32(priv->tx_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) priv->tx_buf += wsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) writel(val, priv->base + SSI_TXDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) static void uniphier_spi_recv(struct uniphier_spi_priv *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) int rsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) rsize = min(bytes_per_word(priv->bits_per_word), priv->rx_bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) priv->rx_bytes -= rsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) val = readl(priv->base + SSI_RXDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) if (priv->rx_buf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) switch (rsize) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) *priv->rx_buf = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) case 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) put_unaligned_le16(val, priv->rx_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) case 4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) put_unaligned_le32(val, priv->rx_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) priv->rx_buf += rsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) static void uniphier_spi_set_fifo_threshold(struct uniphier_spi_priv *priv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) unsigned int threshold)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) val = readl(priv->base + SSI_FC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) val &= ~(SSI_FC_TXFTH_MASK | SSI_FC_RXFTH_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) val |= FIELD_PREP(SSI_FC_TXFTH_MASK, SSI_FIFO_DEPTH - threshold);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) val |= FIELD_PREP(SSI_FC_RXFTH_MASK, threshold);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) writel(val, priv->base + SSI_FC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) static void uniphier_spi_fill_tx_fifo(struct uniphier_spi_priv *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) unsigned int fifo_threshold, fill_words;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) unsigned int bpw = bytes_per_word(priv->bits_per_word);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) fifo_threshold = DIV_ROUND_UP(priv->rx_bytes, bpw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) fifo_threshold = min(fifo_threshold, SSI_FIFO_DEPTH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) uniphier_spi_set_fifo_threshold(priv, fifo_threshold);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) fill_words = fifo_threshold -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) DIV_ROUND_UP(priv->rx_bytes - priv->tx_bytes, bpw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) while (fill_words--)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) uniphier_spi_send(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) static void uniphier_spi_set_cs(struct spi_device *spi, bool enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) struct uniphier_spi_priv *priv = spi_master_get_devdata(spi->master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) val = readl(priv->base + SSI_FPS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) if (enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) val |= SSI_FPS_FSPOL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) val &= ~SSI_FPS_FSPOL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) writel(val, priv->base + SSI_FPS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) static bool uniphier_spi_can_dma(struct spi_master *master,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) struct spi_device *spi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) struct spi_transfer *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) struct uniphier_spi_priv *priv = spi_master_get_devdata(master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) unsigned int bpw = bytes_per_word(priv->bits_per_word);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) if ((!master->dma_tx && !master->dma_rx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) || (!master->dma_tx && t->tx_buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) || (!master->dma_rx && t->rx_buf))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) return DIV_ROUND_UP(t->len, bpw) > SSI_FIFO_DEPTH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) static void uniphier_spi_dma_rxcb(void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) struct spi_master *master = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) struct uniphier_spi_priv *priv = spi_master_get_devdata(master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) int state = atomic_fetch_andnot(SSI_DMA_RX_BUSY, &priv->dma_busy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) uniphier_spi_irq_disable(priv, SSI_IE_RXRE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) if (!(state & SSI_DMA_TX_BUSY))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) spi_finalize_current_transfer(master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) static void uniphier_spi_dma_txcb(void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) struct spi_master *master = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) struct uniphier_spi_priv *priv = spi_master_get_devdata(master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) int state = atomic_fetch_andnot(SSI_DMA_TX_BUSY, &priv->dma_busy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) uniphier_spi_irq_disable(priv, SSI_IE_TXRE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) if (!(state & SSI_DMA_RX_BUSY))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) spi_finalize_current_transfer(master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) static int uniphier_spi_transfer_one_dma(struct spi_master *master,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) struct spi_device *spi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) struct spi_transfer *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) struct uniphier_spi_priv *priv = spi_master_get_devdata(master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) struct dma_async_tx_descriptor *rxdesc = NULL, *txdesc = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) int buswidth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) atomic_set(&priv->dma_busy, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) uniphier_spi_set_fifo_threshold(priv, SSI_FIFO_BURST_NUM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) if (priv->bits_per_word <= 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) buswidth = DMA_SLAVE_BUSWIDTH_1_BYTE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) else if (priv->bits_per_word <= 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) buswidth = DMA_SLAVE_BUSWIDTH_2_BYTES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) buswidth = DMA_SLAVE_BUSWIDTH_4_BYTES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) if (priv->rx_buf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) struct dma_slave_config rxconf = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) .direction = DMA_DEV_TO_MEM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) .src_addr = priv->base_dma_addr + SSI_RXDR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) .src_addr_width = buswidth,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) .src_maxburst = SSI_FIFO_BURST_NUM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) dmaengine_slave_config(master->dma_rx, &rxconf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) rxdesc = dmaengine_prep_slave_sg(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) master->dma_rx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) t->rx_sg.sgl, t->rx_sg.nents,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) if (!rxdesc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) goto out_err_prep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) rxdesc->callback = uniphier_spi_dma_rxcb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) rxdesc->callback_param = master;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) uniphier_spi_irq_enable(priv, SSI_IE_RXRE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) atomic_or(SSI_DMA_RX_BUSY, &priv->dma_busy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) dmaengine_submit(rxdesc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) dma_async_issue_pending(master->dma_rx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) if (priv->tx_buf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) struct dma_slave_config txconf = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) .direction = DMA_MEM_TO_DEV,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) .dst_addr = priv->base_dma_addr + SSI_TXDR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) .dst_addr_width = buswidth,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) .dst_maxburst = SSI_FIFO_BURST_NUM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) dmaengine_slave_config(master->dma_tx, &txconf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) txdesc = dmaengine_prep_slave_sg(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) master->dma_tx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) t->tx_sg.sgl, t->tx_sg.nents,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) if (!txdesc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) goto out_err_prep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) txdesc->callback = uniphier_spi_dma_txcb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) txdesc->callback_param = master;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) uniphier_spi_irq_enable(priv, SSI_IE_TXRE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) atomic_or(SSI_DMA_TX_BUSY, &priv->dma_busy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) dmaengine_submit(txdesc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) dma_async_issue_pending(master->dma_tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) /* signal that we need to wait for completion */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) return (priv->tx_buf || priv->rx_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) out_err_prep:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) if (rxdesc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) dmaengine_terminate_sync(master->dma_rx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) static int uniphier_spi_transfer_one_irq(struct spi_master *master,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) struct spi_device *spi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) struct spi_transfer *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) struct uniphier_spi_priv *priv = spi_master_get_devdata(master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) struct device *dev = master->dev.parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) unsigned long time_left;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) reinit_completion(&priv->xfer_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) uniphier_spi_fill_tx_fifo(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) uniphier_spi_irq_enable(priv, SSI_IE_RCIE | SSI_IE_RORIE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) time_left = wait_for_completion_timeout(&priv->xfer_done,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) msecs_to_jiffies(SSI_TIMEOUT_MS));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) uniphier_spi_irq_disable(priv, SSI_IE_RCIE | SSI_IE_RORIE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) if (!time_left) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) dev_err(dev, "transfer timeout.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) return -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) return priv->error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) static int uniphier_spi_transfer_one_poll(struct spi_master *master,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) struct spi_device *spi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) struct spi_transfer *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) struct uniphier_spi_priv *priv = spi_master_get_devdata(master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) int loop = SSI_POLL_TIMEOUT_US * 10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) while (priv->tx_bytes) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) uniphier_spi_fill_tx_fifo(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) while ((priv->rx_bytes - priv->tx_bytes) > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) while (!(readl(priv->base + SSI_SR) & SSI_SR_RNE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) && loop--)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) ndelay(100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) if (loop == -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) goto irq_transfer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) uniphier_spi_recv(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) irq_transfer:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) return uniphier_spi_transfer_one_irq(master, spi, t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) static int uniphier_spi_transfer_one(struct spi_master *master,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) struct spi_device *spi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) struct spi_transfer *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) struct uniphier_spi_priv *priv = spi_master_get_devdata(master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) unsigned long threshold;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) bool use_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) /* Terminate and return success for 0 byte length transfer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) if (!t->len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) uniphier_spi_setup_transfer(spi, t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) use_dma = master->can_dma ? master->can_dma(master, spi, t) : false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) if (use_dma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) return uniphier_spi_transfer_one_dma(master, spi, t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) * If the transfer operation will take longer than
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) * SSI_POLL_TIMEOUT_US, it should use irq.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) threshold = DIV_ROUND_UP(SSI_POLL_TIMEOUT_US * priv->speed_hz,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) USEC_PER_SEC * BITS_PER_BYTE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) if (t->len > threshold)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) return uniphier_spi_transfer_one_irq(master, spi, t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) return uniphier_spi_transfer_one_poll(master, spi, t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) static int uniphier_spi_prepare_transfer_hardware(struct spi_master *master)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) struct uniphier_spi_priv *priv = spi_master_get_devdata(master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) writel(SSI_CTL_EN, priv->base + SSI_CTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) static int uniphier_spi_unprepare_transfer_hardware(struct spi_master *master)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) struct uniphier_spi_priv *priv = spi_master_get_devdata(master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) writel(0, priv->base + SSI_CTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) static void uniphier_spi_handle_err(struct spi_master *master,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) struct spi_message *msg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) struct uniphier_spi_priv *priv = spi_master_get_devdata(master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) /* stop running spi transfer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) writel(0, priv->base + SSI_CTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) /* reset FIFOs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) val = SSI_FC_TXFFL | SSI_FC_RXFFL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) writel(val, priv->base + SSI_FC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) uniphier_spi_irq_disable(priv, SSI_IE_ALL_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) if (atomic_read(&priv->dma_busy) & SSI_DMA_TX_BUSY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) dmaengine_terminate_async(master->dma_tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) atomic_andnot(SSI_DMA_TX_BUSY, &priv->dma_busy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) if (atomic_read(&priv->dma_busy) & SSI_DMA_RX_BUSY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) dmaengine_terminate_async(master->dma_rx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) atomic_andnot(SSI_DMA_RX_BUSY, &priv->dma_busy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) static irqreturn_t uniphier_spi_handler(int irq, void *dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) struct uniphier_spi_priv *priv = dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) u32 val, stat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) stat = readl(priv->base + SSI_IS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) val = SSI_IC_TCIC | SSI_IC_RCIC | SSI_IC_RORIC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) writel(val, priv->base + SSI_IC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) /* rx fifo overrun */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) if (stat & SSI_IS_RORID) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) priv->error = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) /* rx complete */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) if ((stat & SSI_IS_RCID) && (stat & SSI_IS_RXRS)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) while ((readl(priv->base + SSI_SR) & SSI_SR_RNE) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) (priv->rx_bytes - priv->tx_bytes) > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) uniphier_spi_recv(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) if ((readl(priv->base + SSI_SR) & SSI_SR_RNE) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) (priv->rx_bytes != priv->tx_bytes)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) priv->error = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) } else if (priv->rx_bytes == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) /* next tx transfer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) uniphier_spi_fill_tx_fifo(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) return IRQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) complete(&priv->xfer_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) static int uniphier_spi_probe(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) struct uniphier_spi_priv *priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) struct spi_master *master;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) struct resource *res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) struct dma_slave_caps caps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) u32 dma_tx_burst = 0, dma_rx_burst = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) unsigned long clk_rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) int irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) master = spi_alloc_master(&pdev->dev, sizeof(*priv));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) if (!master)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) platform_set_drvdata(pdev, master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) priv = spi_master_get_devdata(master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) priv->master = master;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) priv->is_save_param = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) priv->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) if (IS_ERR(priv->base)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) ret = PTR_ERR(priv->base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) goto out_master_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) priv->base_dma_addr = res->start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) priv->clk = devm_clk_get(&pdev->dev, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) if (IS_ERR(priv->clk)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) dev_err(&pdev->dev, "failed to get clock\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) ret = PTR_ERR(priv->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) goto out_master_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) ret = clk_prepare_enable(priv->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) goto out_master_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) irq = platform_get_irq(pdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) if (irq < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) ret = irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) goto out_disable_clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) ret = devm_request_irq(&pdev->dev, irq, uniphier_spi_handler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) 0, "uniphier-spi", priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) dev_err(&pdev->dev, "failed to request IRQ\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) goto out_disable_clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) init_completion(&priv->xfer_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) clk_rate = clk_get_rate(priv->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) master->max_speed_hz = DIV_ROUND_UP(clk_rate, SSI_MIN_CLK_DIVIDER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) master->min_speed_hz = DIV_ROUND_UP(clk_rate, SSI_MAX_CLK_DIVIDER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LSB_FIRST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) master->dev.of_node = pdev->dev.of_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) master->bus_num = pdev->id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) master->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) master->set_cs = uniphier_spi_set_cs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) master->transfer_one = uniphier_spi_transfer_one;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) master->prepare_transfer_hardware
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) = uniphier_spi_prepare_transfer_hardware;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) master->unprepare_transfer_hardware
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) = uniphier_spi_unprepare_transfer_hardware;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) master->handle_err = uniphier_spi_handle_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) master->can_dma = uniphier_spi_can_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) master->num_chipselect = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) master->flags = SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) master->dma_tx = dma_request_chan(&pdev->dev, "tx");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) if (IS_ERR_OR_NULL(master->dma_tx)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) if (PTR_ERR(master->dma_tx) == -EPROBE_DEFER) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) ret = -EPROBE_DEFER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) goto out_disable_clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) master->dma_tx = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) dma_tx_burst = INT_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) ret = dma_get_slave_caps(master->dma_tx, &caps);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) dev_err(&pdev->dev, "failed to get TX DMA capacities: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) goto out_release_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) dma_tx_burst = caps.max_burst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) master->dma_rx = dma_request_chan(&pdev->dev, "rx");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) if (IS_ERR_OR_NULL(master->dma_rx)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) if (PTR_ERR(master->dma_rx) == -EPROBE_DEFER) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) ret = -EPROBE_DEFER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) goto out_release_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) master->dma_rx = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) dma_rx_burst = INT_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) ret = dma_get_slave_caps(master->dma_rx, &caps);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) dev_err(&pdev->dev, "failed to get RX DMA capacities: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) goto out_release_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) dma_rx_burst = caps.max_burst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) master->max_dma_len = min(dma_tx_burst, dma_rx_burst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) ret = devm_spi_register_master(&pdev->dev, master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) goto out_release_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) out_release_dma:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) if (!IS_ERR_OR_NULL(master->dma_rx)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) dma_release_channel(master->dma_rx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) master->dma_rx = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) if (!IS_ERR_OR_NULL(master->dma_tx)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) dma_release_channel(master->dma_tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) master->dma_tx = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) out_disable_clk:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) clk_disable_unprepare(priv->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) out_master_put:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) spi_master_put(master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) static int uniphier_spi_remove(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) struct spi_master *master = platform_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) struct uniphier_spi_priv *priv = spi_master_get_devdata(master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) if (master->dma_tx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) dma_release_channel(master->dma_tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) if (master->dma_rx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) dma_release_channel(master->dma_rx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) clk_disable_unprepare(priv->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) static const struct of_device_id uniphier_spi_match[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) { .compatible = "socionext,uniphier-scssi" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) { /* sentinel */ }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) MODULE_DEVICE_TABLE(of, uniphier_spi_match);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) static struct platform_driver uniphier_spi_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) .probe = uniphier_spi_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) .remove = uniphier_spi_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) .driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) .name = "uniphier-spi",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) .of_match_table = uniphier_spi_match,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) module_platform_driver(uniphier_spi_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) MODULE_AUTHOR("Kunihiko Hayashi <hayashi.kunihiko@socionext.com>");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) MODULE_AUTHOR("Keiji Hayashibara <hayashibara.keiji@socionext.com>");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) MODULE_DESCRIPTION("Socionext UniPhier SPI controller driver");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) MODULE_LICENSE("GPL v2");