^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * SH RSPI driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (C) 2012, 2013 Renesas Solutions Corp.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Copyright (C) 2014 Glider bvba
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * Based on spi-sh.c:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * Copyright (C) 2011 Renesas Solutions Corp.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/platform_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/clk.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/dmaengine.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/dma-mapping.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/of_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/pm_runtime.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/sh_dma.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <linux/spi/spi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <linux/spi/rspi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <linux/spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #define RSPI_SPCR 0x00 /* Control Register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #define RSPI_SSLP 0x01 /* Slave Select Polarity Register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #define RSPI_SPPCR 0x02 /* Pin Control Register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #define RSPI_SPSR 0x03 /* Status Register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #define RSPI_SPDR 0x04 /* Data Register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #define RSPI_SPSCR 0x08 /* Sequence Control Register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #define RSPI_SPSSR 0x09 /* Sequence Status Register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #define RSPI_SPBR 0x0a /* Bit Rate Register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #define RSPI_SPDCR 0x0b /* Data Control Register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #define RSPI_SPCKD 0x0c /* Clock Delay Register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #define RSPI_SSLND 0x0d /* Slave Select Negation Delay Register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #define RSPI_SPND 0x0e /* Next-Access Delay Register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #define RSPI_SPCR2 0x0f /* Control Register 2 (SH only) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #define RSPI_SPCMD0 0x10 /* Command Register 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #define RSPI_SPCMD1 0x12 /* Command Register 1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) #define RSPI_SPCMD2 0x14 /* Command Register 2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #define RSPI_SPCMD3 0x16 /* Command Register 3 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) #define RSPI_SPCMD4 0x18 /* Command Register 4 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) #define RSPI_SPCMD5 0x1a /* Command Register 5 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) #define RSPI_SPCMD6 0x1c /* Command Register 6 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) #define RSPI_SPCMD7 0x1e /* Command Register 7 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) #define RSPI_SPCMD(i) (RSPI_SPCMD0 + (i) * 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) #define RSPI_NUM_SPCMD 8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) #define RSPI_RZ_NUM_SPCMD 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) #define QSPI_NUM_SPCMD 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) /* RSPI on RZ only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) #define RSPI_SPBFCR 0x20 /* Buffer Control Register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) #define RSPI_SPBFDR 0x22 /* Buffer Data Count Setting Register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) /* QSPI only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) #define QSPI_SPBFCR 0x18 /* Buffer Control Register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) #define QSPI_SPBDCR 0x1a /* Buffer Data Count Register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) #define QSPI_SPBMUL0 0x1c /* Transfer Data Length Multiplier Setting Register 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) #define QSPI_SPBMUL1 0x20 /* Transfer Data Length Multiplier Setting Register 1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) #define QSPI_SPBMUL2 0x24 /* Transfer Data Length Multiplier Setting Register 2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) #define QSPI_SPBMUL3 0x28 /* Transfer Data Length Multiplier Setting Register 3 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) #define QSPI_SPBMUL(i) (QSPI_SPBMUL0 + (i) * 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) /* SPCR - Control Register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) #define SPCR_SPRIE 0x80 /* Receive Interrupt Enable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) #define SPCR_SPE 0x40 /* Function Enable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) #define SPCR_SPTIE 0x20 /* Transmit Interrupt Enable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) #define SPCR_SPEIE 0x10 /* Error Interrupt Enable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) #define SPCR_MSTR 0x08 /* Master/Slave Mode Select */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) #define SPCR_MODFEN 0x04 /* Mode Fault Error Detection Enable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) /* RSPI on SH only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) #define SPCR_TXMD 0x02 /* TX Only Mode (vs. Full Duplex) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) #define SPCR_SPMS 0x01 /* 3-wire Mode (vs. 4-wire) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) /* QSPI on R-Car Gen2 only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) #define SPCR_WSWAP 0x02 /* Word Swap of read-data for DMAC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) #define SPCR_BSWAP 0x01 /* Byte Swap of read-data for DMAC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) /* SSLP - Slave Select Polarity Register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) #define SSLP_SSLP(i) BIT(i) /* SSLi Signal Polarity Setting */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) /* SPPCR - Pin Control Register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) #define SPPCR_MOIFE 0x20 /* MOSI Idle Value Fixing Enable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) #define SPPCR_MOIFV 0x10 /* MOSI Idle Fixed Value */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) #define SPPCR_SPOM 0x04
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) #define SPPCR_SPLP2 0x02 /* Loopback Mode 2 (non-inverting) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) #define SPPCR_SPLP 0x01 /* Loopback Mode (inverting) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) #define SPPCR_IO3FV 0x04 /* Single-/Dual-SPI Mode IO3 Output Fixed Value */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) #define SPPCR_IO2FV 0x04 /* Single-/Dual-SPI Mode IO2 Output Fixed Value */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) /* SPSR - Status Register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) #define SPSR_SPRF 0x80 /* Receive Buffer Full Flag */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) #define SPSR_TEND 0x40 /* Transmit End */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) #define SPSR_SPTEF 0x20 /* Transmit Buffer Empty Flag */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) #define SPSR_PERF 0x08 /* Parity Error Flag */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) #define SPSR_MODF 0x04 /* Mode Fault Error Flag */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) #define SPSR_IDLNF 0x02 /* RSPI Idle Flag */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) #define SPSR_OVRF 0x01 /* Overrun Error Flag (RSPI only) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) /* SPSCR - Sequence Control Register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) #define SPSCR_SPSLN_MASK 0x07 /* Sequence Length Specification */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) /* SPSSR - Sequence Status Register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) #define SPSSR_SPECM_MASK 0x70 /* Command Error Mask */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) #define SPSSR_SPCP_MASK 0x07 /* Command Pointer Mask */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) /* SPDCR - Data Control Register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) #define SPDCR_TXDMY 0x80 /* Dummy Data Transmission Enable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) #define SPDCR_SPLW1 0x40 /* Access Width Specification (RZ) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) #define SPDCR_SPLW0 0x20 /* Access Width Specification (RZ) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) #define SPDCR_SPLLWORD (SPDCR_SPLW1 | SPDCR_SPLW0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) #define SPDCR_SPLWORD SPDCR_SPLW1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) #define SPDCR_SPLBYTE SPDCR_SPLW0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) #define SPDCR_SPLW 0x20 /* Access Width Specification (SH) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) #define SPDCR_SPRDTD 0x10 /* Receive Transmit Data Select (SH) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) #define SPDCR_SLSEL1 0x08
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) #define SPDCR_SLSEL0 0x04
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) #define SPDCR_SLSEL_MASK 0x0c /* SSL1 Output Select (SH) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) #define SPDCR_SPFC1 0x02
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) #define SPDCR_SPFC0 0x01
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) #define SPDCR_SPFC_MASK 0x03 /* Frame Count Setting (1-4) (SH) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) /* SPCKD - Clock Delay Register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) #define SPCKD_SCKDL_MASK 0x07 /* Clock Delay Setting (1-8) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) /* SSLND - Slave Select Negation Delay Register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) #define SSLND_SLNDL_MASK 0x07 /* SSL Negation Delay Setting (1-8) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) /* SPND - Next-Access Delay Register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) #define SPND_SPNDL_MASK 0x07 /* Next-Access Delay Setting (1-8) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) /* SPCR2 - Control Register 2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) #define SPCR2_PTE 0x08 /* Parity Self-Test Enable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) #define SPCR2_SPIE 0x04 /* Idle Interrupt Enable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) #define SPCR2_SPOE 0x02 /* Odd Parity Enable (vs. Even) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) #define SPCR2_SPPE 0x01 /* Parity Enable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) /* SPCMDn - Command Registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) #define SPCMD_SCKDEN 0x8000 /* Clock Delay Setting Enable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) #define SPCMD_SLNDEN 0x4000 /* SSL Negation Delay Setting Enable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) #define SPCMD_SPNDEN 0x2000 /* Next-Access Delay Enable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) #define SPCMD_LSBF 0x1000 /* LSB First */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) #define SPCMD_SPB_MASK 0x0f00 /* Data Length Setting */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) #define SPCMD_SPB_8_TO_16(bit) (((bit - 1) << 8) & SPCMD_SPB_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) #define SPCMD_SPB_8BIT 0x0000 /* QSPI only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) #define SPCMD_SPB_16BIT 0x0100
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) #define SPCMD_SPB_20BIT 0x0000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) #define SPCMD_SPB_24BIT 0x0100
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) #define SPCMD_SPB_32BIT 0x0200
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) #define SPCMD_SSLKP 0x0080 /* SSL Signal Level Keeping */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) #define SPCMD_SPIMOD_MASK 0x0060 /* SPI Operating Mode (QSPI only) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) #define SPCMD_SPIMOD1 0x0040
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) #define SPCMD_SPIMOD0 0x0020
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) #define SPCMD_SPIMOD_SINGLE 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) #define SPCMD_SPIMOD_DUAL SPCMD_SPIMOD0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) #define SPCMD_SPIMOD_QUAD SPCMD_SPIMOD1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) #define SPCMD_SPRW 0x0010 /* SPI Read/Write Access (Dual/Quad) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) #define SPCMD_SSLA(i) ((i) << 4) /* SSL Assert Signal Setting */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) #define SPCMD_BRDV_MASK 0x000c /* Bit Rate Division Setting */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) #define SPCMD_BRDV(brdv) ((brdv) << 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) #define SPCMD_CPOL 0x0002 /* Clock Polarity Setting */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) #define SPCMD_CPHA 0x0001 /* Clock Phase Setting */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) /* SPBFCR - Buffer Control Register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) #define SPBFCR_TXRST 0x80 /* Transmit Buffer Data Reset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) #define SPBFCR_RXRST 0x40 /* Receive Buffer Data Reset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) #define SPBFCR_TXTRG_MASK 0x30 /* Transmit Buffer Data Triggering Number */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) #define SPBFCR_RXTRG_MASK 0x07 /* Receive Buffer Data Triggering Number */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) /* QSPI on R-Car Gen2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) #define SPBFCR_TXTRG_1B 0x00 /* 31 bytes (1 byte available) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) #define SPBFCR_TXTRG_32B 0x30 /* 0 byte (32 bytes available) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) #define SPBFCR_RXTRG_1B 0x00 /* 1 byte (31 bytes available) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) #define SPBFCR_RXTRG_32B 0x07 /* 32 bytes (0 byte available) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) #define QSPI_BUFFER_SIZE 32u
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) struct rspi_data {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) void __iomem *addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) u32 speed_hz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) struct spi_controller *ctlr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) struct platform_device *pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) wait_queue_head_t wait;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) spinlock_t lock; /* Protects RMW-access to RSPI_SSLP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) struct clk *clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) u16 spcmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) u8 spsr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) u8 sppcr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) int rx_irq, tx_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) const struct spi_ops *ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) unsigned dma_callbacked:1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) unsigned byte_access:1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) static void rspi_write8(const struct rspi_data *rspi, u8 data, u16 offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) iowrite8(data, rspi->addr + offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) static void rspi_write16(const struct rspi_data *rspi, u16 data, u16 offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) iowrite16(data, rspi->addr + offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) static void rspi_write32(const struct rspi_data *rspi, u32 data, u16 offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) iowrite32(data, rspi->addr + offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) static u8 rspi_read8(const struct rspi_data *rspi, u16 offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) return ioread8(rspi->addr + offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) static u16 rspi_read16(const struct rspi_data *rspi, u16 offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) return ioread16(rspi->addr + offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) static void rspi_write_data(const struct rspi_data *rspi, u16 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) if (rspi->byte_access)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) rspi_write8(rspi, data, RSPI_SPDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) else /* 16 bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) rspi_write16(rspi, data, RSPI_SPDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) static u16 rspi_read_data(const struct rspi_data *rspi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) if (rspi->byte_access)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) return rspi_read8(rspi, RSPI_SPDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) else /* 16 bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) return rspi_read16(rspi, RSPI_SPDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) /* optional functions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) struct spi_ops {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) int (*set_config_register)(struct rspi_data *rspi, int access_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) int (*transfer_one)(struct spi_controller *ctlr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) struct spi_device *spi, struct spi_transfer *xfer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) u16 extra_mode_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) u16 min_div;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) u16 max_div;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) u16 flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) u16 fifo_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) u8 num_hw_ss;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) static void rspi_set_rate(struct rspi_data *rspi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) unsigned long clksrc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) int brdv = 0, spbr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) clksrc = clk_get_rate(rspi->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) spbr = DIV_ROUND_UP(clksrc, 2 * rspi->speed_hz) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) while (spbr > 255 && brdv < 3) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) brdv++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) spbr = DIV_ROUND_UP(spbr + 1, 2) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) rspi_write8(rspi, clamp(spbr, 0, 255), RSPI_SPBR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) rspi->spcmd |= SPCMD_BRDV(brdv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) rspi->speed_hz = DIV_ROUND_UP(clksrc, (2U << brdv) * (spbr + 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) * functions for RSPI on legacy SH
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) static int rspi_set_config_register(struct rspi_data *rspi, int access_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) /* Sets output mode, MOSI signal, and (optionally) loopback */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) rspi_write8(rspi, rspi->sppcr, RSPI_SPPCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) /* Sets transfer bit rate */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) rspi_set_rate(rspi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) /* Disable dummy transmission, set 16-bit word access, 1 frame */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) rspi_write8(rspi, 0, RSPI_SPDCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) rspi->byte_access = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) /* Sets RSPCK, SSL, next-access delay value */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) rspi_write8(rspi, 0x00, RSPI_SPCKD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) rspi_write8(rspi, 0x00, RSPI_SSLND);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) rspi_write8(rspi, 0x00, RSPI_SPND);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) /* Sets parity, interrupt mask */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) rspi_write8(rspi, 0x00, RSPI_SPCR2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) /* Resets sequencer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) rspi_write8(rspi, 0, RSPI_SPSCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) rspi->spcmd |= SPCMD_SPB_8_TO_16(access_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) rspi_write16(rspi, rspi->spcmd, RSPI_SPCMD0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) /* Sets RSPI mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) rspi_write8(rspi, SPCR_MSTR, RSPI_SPCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) * functions for RSPI on RZ
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) static int rspi_rz_set_config_register(struct rspi_data *rspi, int access_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) /* Sets output mode, MOSI signal, and (optionally) loopback */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) rspi_write8(rspi, rspi->sppcr, RSPI_SPPCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) /* Sets transfer bit rate */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) rspi_set_rate(rspi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) /* Disable dummy transmission, set byte access */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) rspi_write8(rspi, SPDCR_SPLBYTE, RSPI_SPDCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) rspi->byte_access = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) /* Sets RSPCK, SSL, next-access delay value */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) rspi_write8(rspi, 0x00, RSPI_SPCKD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) rspi_write8(rspi, 0x00, RSPI_SSLND);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) rspi_write8(rspi, 0x00, RSPI_SPND);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) /* Resets sequencer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) rspi_write8(rspi, 0, RSPI_SPSCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) rspi->spcmd |= SPCMD_SPB_8_TO_16(access_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) rspi_write16(rspi, rspi->spcmd, RSPI_SPCMD0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) /* Sets RSPI mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) rspi_write8(rspi, SPCR_MSTR, RSPI_SPCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) * functions for QSPI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) static int qspi_set_config_register(struct rspi_data *rspi, int access_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) unsigned long clksrc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) int brdv = 0, spbr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) /* Sets output mode, MOSI signal, and (optionally) loopback */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) rspi_write8(rspi, rspi->sppcr, RSPI_SPPCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) /* Sets transfer bit rate */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) clksrc = clk_get_rate(rspi->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) if (rspi->speed_hz >= clksrc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) spbr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) rspi->speed_hz = clksrc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) spbr = DIV_ROUND_UP(clksrc, 2 * rspi->speed_hz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) while (spbr > 255 && brdv < 3) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) brdv++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) spbr = DIV_ROUND_UP(spbr, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) spbr = clamp(spbr, 0, 255);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) rspi->speed_hz = DIV_ROUND_UP(clksrc, (2U << brdv) * spbr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) rspi_write8(rspi, spbr, RSPI_SPBR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) rspi->spcmd |= SPCMD_BRDV(brdv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) /* Disable dummy transmission, set byte access */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) rspi_write8(rspi, 0, RSPI_SPDCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) rspi->byte_access = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) /* Sets RSPCK, SSL, next-access delay value */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) rspi_write8(rspi, 0x00, RSPI_SPCKD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) rspi_write8(rspi, 0x00, RSPI_SSLND);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) rspi_write8(rspi, 0x00, RSPI_SPND);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) /* Data Length Setting */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) if (access_size == 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) rspi->spcmd |= SPCMD_SPB_8BIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) else if (access_size == 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) rspi->spcmd |= SPCMD_SPB_16BIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) rspi->spcmd |= SPCMD_SPB_32BIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) rspi->spcmd |= SPCMD_SCKDEN | SPCMD_SLNDEN | SPCMD_SPNDEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) /* Resets transfer data length */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) rspi_write32(rspi, 0, QSPI_SPBMUL0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) /* Resets transmit and receive buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) rspi_write8(rspi, SPBFCR_TXRST | SPBFCR_RXRST, QSPI_SPBFCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) /* Sets buffer to allow normal operation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) rspi_write8(rspi, 0x00, QSPI_SPBFCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) /* Resets sequencer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) rspi_write8(rspi, 0, RSPI_SPSCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) rspi_write16(rspi, rspi->spcmd, RSPI_SPCMD0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) /* Sets RSPI mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) rspi_write8(rspi, SPCR_MSTR, RSPI_SPCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) static void qspi_update(const struct rspi_data *rspi, u8 mask, u8 val, u8 reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) u8 data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) data = rspi_read8(rspi, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) data &= ~mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) data |= (val & mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) rspi_write8(rspi, data, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) static unsigned int qspi_set_send_trigger(struct rspi_data *rspi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) unsigned int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) unsigned int n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) n = min(len, QSPI_BUFFER_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) if (len >= QSPI_BUFFER_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) /* sets triggering number to 32 bytes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) qspi_update(rspi, SPBFCR_TXTRG_MASK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) SPBFCR_TXTRG_32B, QSPI_SPBFCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) /* sets triggering number to 1 byte */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) qspi_update(rspi, SPBFCR_TXTRG_MASK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) SPBFCR_TXTRG_1B, QSPI_SPBFCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) return n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) static int qspi_set_receive_trigger(struct rspi_data *rspi, unsigned int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) unsigned int n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) n = min(len, QSPI_BUFFER_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) if (len >= QSPI_BUFFER_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) /* sets triggering number to 32 bytes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) qspi_update(rspi, SPBFCR_RXTRG_MASK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) SPBFCR_RXTRG_32B, QSPI_SPBFCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) /* sets triggering number to 1 byte */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) qspi_update(rspi, SPBFCR_RXTRG_MASK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) SPBFCR_RXTRG_1B, QSPI_SPBFCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) return n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) static void rspi_enable_irq(const struct rspi_data *rspi, u8 enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) rspi_write8(rspi, rspi_read8(rspi, RSPI_SPCR) | enable, RSPI_SPCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) static void rspi_disable_irq(const struct rspi_data *rspi, u8 disable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) rspi_write8(rspi, rspi_read8(rspi, RSPI_SPCR) & ~disable, RSPI_SPCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) static int rspi_wait_for_interrupt(struct rspi_data *rspi, u8 wait_mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) u8 enable_bit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) rspi->spsr = rspi_read8(rspi, RSPI_SPSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) if (rspi->spsr & wait_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) rspi_enable_irq(rspi, enable_bit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) ret = wait_event_timeout(rspi->wait, rspi->spsr & wait_mask, HZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) if (ret == 0 && !(rspi->spsr & wait_mask))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) return -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) static inline int rspi_wait_for_tx_empty(struct rspi_data *rspi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) return rspi_wait_for_interrupt(rspi, SPSR_SPTEF, SPCR_SPTIE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) static inline int rspi_wait_for_rx_full(struct rspi_data *rspi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) return rspi_wait_for_interrupt(rspi, SPSR_SPRF, SPCR_SPRIE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) static int rspi_data_out(struct rspi_data *rspi, u8 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) int error = rspi_wait_for_tx_empty(rspi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) if (error < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) dev_err(&rspi->ctlr->dev, "transmit timeout\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) rspi_write_data(rspi, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) static int rspi_data_in(struct rspi_data *rspi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) u8 data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) error = rspi_wait_for_rx_full(rspi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) if (error < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) dev_err(&rspi->ctlr->dev, "receive timeout\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) data = rspi_read_data(rspi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) return data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) static int rspi_pio_transfer(struct rspi_data *rspi, const u8 *tx, u8 *rx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) unsigned int n)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) while (n-- > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) if (tx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) int ret = rspi_data_out(rspi, *tx++);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) if (rx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) int ret = rspi_data_in(rspi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) *rx++ = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) static void rspi_dma_complete(void *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) struct rspi_data *rspi = arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) rspi->dma_callbacked = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) wake_up_interruptible(&rspi->wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) static int rspi_dma_transfer(struct rspi_data *rspi, struct sg_table *tx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) struct sg_table *rx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) struct dma_async_tx_descriptor *desc_tx = NULL, *desc_rx = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) u8 irq_mask = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) unsigned int other_irq = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) dma_cookie_t cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) /* First prepare and submit the DMA request(s), as this may fail */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) if (rx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) desc_rx = dmaengine_prep_slave_sg(rspi->ctlr->dma_rx, rx->sgl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) rx->nents, DMA_DEV_TO_MEM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) if (!desc_rx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) ret = -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) goto no_dma_rx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) desc_rx->callback = rspi_dma_complete;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) desc_rx->callback_param = rspi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) cookie = dmaengine_submit(desc_rx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) if (dma_submit_error(cookie)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) ret = cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) goto no_dma_rx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) irq_mask |= SPCR_SPRIE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) if (tx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) desc_tx = dmaengine_prep_slave_sg(rspi->ctlr->dma_tx, tx->sgl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) tx->nents, DMA_MEM_TO_DEV,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) if (!desc_tx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) ret = -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) goto no_dma_tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) if (rx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) /* No callback */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) desc_tx->callback = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) desc_tx->callback = rspi_dma_complete;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) desc_tx->callback_param = rspi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) cookie = dmaengine_submit(desc_tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) if (dma_submit_error(cookie)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) ret = cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) goto no_dma_tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) irq_mask |= SPCR_SPTIE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) * DMAC needs SPxIE, but if SPxIE is set, the IRQ routine will be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) * called. So, this driver disables the IRQ while DMA transfer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) if (tx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) disable_irq(other_irq = rspi->tx_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) if (rx && rspi->rx_irq != other_irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) disable_irq(rspi->rx_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) rspi_enable_irq(rspi, irq_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) rspi->dma_callbacked = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) /* Now start DMA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) if (rx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) dma_async_issue_pending(rspi->ctlr->dma_rx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) if (tx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) dma_async_issue_pending(rspi->ctlr->dma_tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) ret = wait_event_interruptible_timeout(rspi->wait,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) rspi->dma_callbacked, HZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) if (ret > 0 && rspi->dma_callbacked) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) if (!ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) dev_err(&rspi->ctlr->dev, "DMA timeout\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) ret = -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) if (tx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) dmaengine_terminate_all(rspi->ctlr->dma_tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) if (rx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) dmaengine_terminate_all(rspi->ctlr->dma_rx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) rspi_disable_irq(rspi, irq_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) if (tx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) enable_irq(rspi->tx_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) if (rx && rspi->rx_irq != other_irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) enable_irq(rspi->rx_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) no_dma_tx:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) if (rx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) dmaengine_terminate_all(rspi->ctlr->dma_rx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) no_dma_rx:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) if (ret == -EAGAIN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) dev_warn_once(&rspi->ctlr->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) "DMA not available, falling back to PIO\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) static void rspi_receive_init(const struct rspi_data *rspi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) u8 spsr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) spsr = rspi_read8(rspi, RSPI_SPSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) if (spsr & SPSR_SPRF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) rspi_read_data(rspi); /* dummy read */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) if (spsr & SPSR_OVRF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) rspi_write8(rspi, rspi_read8(rspi, RSPI_SPSR) & ~SPSR_OVRF,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) RSPI_SPSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) static void rspi_rz_receive_init(const struct rspi_data *rspi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) rspi_receive_init(rspi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) rspi_write8(rspi, SPBFCR_TXRST | SPBFCR_RXRST, RSPI_SPBFCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) rspi_write8(rspi, 0, RSPI_SPBFCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) static void qspi_receive_init(const struct rspi_data *rspi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) u8 spsr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) spsr = rspi_read8(rspi, RSPI_SPSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) if (spsr & SPSR_SPRF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) rspi_read_data(rspi); /* dummy read */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) rspi_write8(rspi, SPBFCR_TXRST | SPBFCR_RXRST, QSPI_SPBFCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) rspi_write8(rspi, 0, QSPI_SPBFCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) static bool __rspi_can_dma(const struct rspi_data *rspi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) const struct spi_transfer *xfer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) return xfer->len > rspi->ops->fifo_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) static bool rspi_can_dma(struct spi_controller *ctlr, struct spi_device *spi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) struct spi_transfer *xfer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) struct rspi_data *rspi = spi_controller_get_devdata(ctlr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) return __rspi_can_dma(rspi, xfer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) static int rspi_dma_check_then_transfer(struct rspi_data *rspi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) struct spi_transfer *xfer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) if (!rspi->ctlr->can_dma || !__rspi_can_dma(rspi, xfer))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) /* rx_buf can be NULL on RSPI on SH in TX-only Mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) return rspi_dma_transfer(rspi, &xfer->tx_sg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) xfer->rx_buf ? &xfer->rx_sg : NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) static int rspi_common_transfer(struct rspi_data *rspi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) struct spi_transfer *xfer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) xfer->effective_speed_hz = rspi->speed_hz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) ret = rspi_dma_check_then_transfer(rspi, xfer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) if (ret != -EAGAIN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) ret = rspi_pio_transfer(rspi, xfer->tx_buf, xfer->rx_buf, xfer->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) /* Wait for the last transmission */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) rspi_wait_for_tx_empty(rspi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) static int rspi_transfer_one(struct spi_controller *ctlr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) struct spi_device *spi, struct spi_transfer *xfer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) struct rspi_data *rspi = spi_controller_get_devdata(ctlr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) u8 spcr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) spcr = rspi_read8(rspi, RSPI_SPCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) if (xfer->rx_buf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) rspi_receive_init(rspi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) spcr &= ~SPCR_TXMD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) spcr |= SPCR_TXMD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) rspi_write8(rspi, spcr, RSPI_SPCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) return rspi_common_transfer(rspi, xfer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) static int rspi_rz_transfer_one(struct spi_controller *ctlr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) struct spi_device *spi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) struct spi_transfer *xfer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) struct rspi_data *rspi = spi_controller_get_devdata(ctlr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) rspi_rz_receive_init(rspi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) return rspi_common_transfer(rspi, xfer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) static int qspi_trigger_transfer_out_in(struct rspi_data *rspi, const u8 *tx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) u8 *rx, unsigned int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) unsigned int i, n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) while (len > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) n = qspi_set_send_trigger(rspi, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) qspi_set_receive_trigger(rspi, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) ret = rspi_wait_for_tx_empty(rspi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) dev_err(&rspi->ctlr->dev, "transmit timeout\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) for (i = 0; i < n; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) rspi_write_data(rspi, *tx++);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) ret = rspi_wait_for_rx_full(rspi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) dev_err(&rspi->ctlr->dev, "receive timeout\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) for (i = 0; i < n; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) *rx++ = rspi_read_data(rspi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) len -= n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) static int qspi_transfer_out_in(struct rspi_data *rspi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) struct spi_transfer *xfer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) qspi_receive_init(rspi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) ret = rspi_dma_check_then_transfer(rspi, xfer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) if (ret != -EAGAIN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) return qspi_trigger_transfer_out_in(rspi, xfer->tx_buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) xfer->rx_buf, xfer->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) static int qspi_transfer_out(struct rspi_data *rspi, struct spi_transfer *xfer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) const u8 *tx = xfer->tx_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) unsigned int n = xfer->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) unsigned int i, len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) if (rspi->ctlr->can_dma && __rspi_can_dma(rspi, xfer)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) ret = rspi_dma_transfer(rspi, &xfer->tx_sg, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) if (ret != -EAGAIN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) while (n > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) len = qspi_set_send_trigger(rspi, n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) ret = rspi_wait_for_tx_empty(rspi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) dev_err(&rspi->ctlr->dev, "transmit timeout\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) for (i = 0; i < len; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) rspi_write_data(rspi, *tx++);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) n -= len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) /* Wait for the last transmission */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) rspi_wait_for_tx_empty(rspi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) static int qspi_transfer_in(struct rspi_data *rspi, struct spi_transfer *xfer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) u8 *rx = xfer->rx_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) unsigned int n = xfer->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) unsigned int i, len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) if (rspi->ctlr->can_dma && __rspi_can_dma(rspi, xfer)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) int ret = rspi_dma_transfer(rspi, NULL, &xfer->rx_sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) if (ret != -EAGAIN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) while (n > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) len = qspi_set_receive_trigger(rspi, n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) ret = rspi_wait_for_rx_full(rspi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) dev_err(&rspi->ctlr->dev, "receive timeout\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) for (i = 0; i < len; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) *rx++ = rspi_read_data(rspi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) n -= len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) static int qspi_transfer_one(struct spi_controller *ctlr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) struct spi_device *spi, struct spi_transfer *xfer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) struct rspi_data *rspi = spi_controller_get_devdata(ctlr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) xfer->effective_speed_hz = rspi->speed_hz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) if (spi->mode & SPI_LOOP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) return qspi_transfer_out_in(rspi, xfer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) } else if (xfer->tx_nbits > SPI_NBITS_SINGLE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) /* Quad or Dual SPI Write */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) return qspi_transfer_out(rspi, xfer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) } else if (xfer->rx_nbits > SPI_NBITS_SINGLE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) /* Quad or Dual SPI Read */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) return qspi_transfer_in(rspi, xfer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) /* Single SPI Transfer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) return qspi_transfer_out_in(rspi, xfer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) static u16 qspi_transfer_mode(const struct spi_transfer *xfer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) if (xfer->tx_buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) switch (xfer->tx_nbits) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) case SPI_NBITS_QUAD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) return SPCMD_SPIMOD_QUAD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) case SPI_NBITS_DUAL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) return SPCMD_SPIMOD_DUAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) if (xfer->rx_buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) switch (xfer->rx_nbits) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) case SPI_NBITS_QUAD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) return SPCMD_SPIMOD_QUAD | SPCMD_SPRW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) case SPI_NBITS_DUAL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) return SPCMD_SPIMOD_DUAL | SPCMD_SPRW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) static int qspi_setup_sequencer(struct rspi_data *rspi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) const struct spi_message *msg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) const struct spi_transfer *xfer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) unsigned int i = 0, len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) u16 current_mode = 0xffff, mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) list_for_each_entry(xfer, &msg->transfers, transfer_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) mode = qspi_transfer_mode(xfer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) if (mode == current_mode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) len += xfer->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) /* Transfer mode change */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) if (i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) /* Set transfer data length of previous transfer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) rspi_write32(rspi, len, QSPI_SPBMUL(i - 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) if (i >= QSPI_NUM_SPCMD) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) dev_err(&msg->spi->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) "Too many different transfer modes");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) /* Program transfer mode for this transfer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) rspi_write16(rspi, rspi->spcmd | mode, RSPI_SPCMD(i));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) current_mode = mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) len = xfer->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) i++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) if (i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) /* Set final transfer data length and sequence length */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) rspi_write32(rspi, len, QSPI_SPBMUL(i - 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) rspi_write8(rspi, i - 1, RSPI_SPSCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) static int rspi_setup(struct spi_device *spi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) struct rspi_data *rspi = spi_controller_get_devdata(spi->controller);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) u8 sslp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) if (spi->cs_gpiod)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) pm_runtime_get_sync(&rspi->pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) spin_lock_irq(&rspi->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) sslp = rspi_read8(rspi, RSPI_SSLP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) if (spi->mode & SPI_CS_HIGH)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) sslp |= SSLP_SSLP(spi->chip_select);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) sslp &= ~SSLP_SSLP(spi->chip_select);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) rspi_write8(rspi, sslp, RSPI_SSLP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) spin_unlock_irq(&rspi->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) pm_runtime_put(&rspi->pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) static int rspi_prepare_message(struct spi_controller *ctlr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) struct spi_message *msg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) struct rspi_data *rspi = spi_controller_get_devdata(ctlr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) struct spi_device *spi = msg->spi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) const struct spi_transfer *xfer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) * As the Bit Rate Register must not be changed while the device is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) * active, all transfers in a message must use the same bit rate.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) * In theory, the sequencer could be enabled, and each Command Register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) * could divide the base bit rate by a different value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) * However, most RSPI variants do not have Transfer Data Length
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) * Multiplier Setting Registers, so each sequence step would be limited
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) * to a single word, making this feature unsuitable for large
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) * transfers, which would gain most from it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) rspi->speed_hz = spi->max_speed_hz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) list_for_each_entry(xfer, &msg->transfers, transfer_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) if (xfer->speed_hz < rspi->speed_hz)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) rspi->speed_hz = xfer->speed_hz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) rspi->spcmd = SPCMD_SSLKP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) if (spi->mode & SPI_CPOL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) rspi->spcmd |= SPCMD_CPOL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) if (spi->mode & SPI_CPHA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) rspi->spcmd |= SPCMD_CPHA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) if (spi->mode & SPI_LSB_FIRST)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) rspi->spcmd |= SPCMD_LSBF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) /* Configure slave signal to assert */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) rspi->spcmd |= SPCMD_SSLA(spi->cs_gpiod ? rspi->ctlr->unused_native_cs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) : spi->chip_select);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) /* CMOS output mode and MOSI signal from previous transfer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) rspi->sppcr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) if (spi->mode & SPI_LOOP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) rspi->sppcr |= SPPCR_SPLP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) rspi->ops->set_config_register(rspi, 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) if (msg->spi->mode &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) (SPI_TX_DUAL | SPI_TX_QUAD | SPI_RX_DUAL | SPI_RX_QUAD)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) /* Setup sequencer for messages with multiple transfer modes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) ret = qspi_setup_sequencer(rspi, msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) /* Enable SPI function in master mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) rspi_write8(rspi, rspi_read8(rspi, RSPI_SPCR) | SPCR_SPE, RSPI_SPCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) static int rspi_unprepare_message(struct spi_controller *ctlr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) struct spi_message *msg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) struct rspi_data *rspi = spi_controller_get_devdata(ctlr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) /* Disable SPI function */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) rspi_write8(rspi, rspi_read8(rspi, RSPI_SPCR) & ~SPCR_SPE, RSPI_SPCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) /* Reset sequencer for Single SPI Transfers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) rspi_write16(rspi, rspi->spcmd, RSPI_SPCMD0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) rspi_write8(rspi, 0, RSPI_SPSCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) static irqreturn_t rspi_irq_mux(int irq, void *_sr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) struct rspi_data *rspi = _sr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) u8 spsr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) irqreturn_t ret = IRQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) u8 disable_irq = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) rspi->spsr = spsr = rspi_read8(rspi, RSPI_SPSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) if (spsr & SPSR_SPRF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) disable_irq |= SPCR_SPRIE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) if (spsr & SPSR_SPTEF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) disable_irq |= SPCR_SPTIE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) if (disable_irq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) ret = IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) rspi_disable_irq(rspi, disable_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) wake_up(&rspi->wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) static irqreturn_t rspi_irq_rx(int irq, void *_sr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) struct rspi_data *rspi = _sr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) u8 spsr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) rspi->spsr = spsr = rspi_read8(rspi, RSPI_SPSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) if (spsr & SPSR_SPRF) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) rspi_disable_irq(rspi, SPCR_SPRIE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) wake_up(&rspi->wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) static irqreturn_t rspi_irq_tx(int irq, void *_sr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) struct rspi_data *rspi = _sr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) u8 spsr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) rspi->spsr = spsr = rspi_read8(rspi, RSPI_SPSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) if (spsr & SPSR_SPTEF) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) rspi_disable_irq(rspi, SPCR_SPTIE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) wake_up(&rspi->wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) static struct dma_chan *rspi_request_dma_chan(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) enum dma_transfer_direction dir,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) unsigned int id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) dma_addr_t port_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) dma_cap_mask_t mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) struct dma_chan *chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) struct dma_slave_config cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) dma_cap_zero(mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) dma_cap_set(DMA_SLAVE, mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) chan = dma_request_slave_channel_compat(mask, shdma_chan_filter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) (void *)(unsigned long)id, dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) dir == DMA_MEM_TO_DEV ? "tx" : "rx");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) if (!chan) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) dev_warn(dev, "dma_request_slave_channel_compat failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) memset(&cfg, 0, sizeof(cfg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) cfg.direction = dir;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) if (dir == DMA_MEM_TO_DEV) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) cfg.dst_addr = port_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) cfg.src_addr = port_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) ret = dmaengine_slave_config(chan, &cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) dev_warn(dev, "dmaengine_slave_config failed %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) dma_release_channel(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) return chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) static int rspi_request_dma(struct device *dev, struct spi_controller *ctlr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) const struct resource *res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) const struct rspi_plat_data *rspi_pd = dev_get_platdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) unsigned int dma_tx_id, dma_rx_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) if (dev->of_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) /* In the OF case we will get the slave IDs from the DT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) dma_tx_id = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) dma_rx_id = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) } else if (rspi_pd && rspi_pd->dma_tx_id && rspi_pd->dma_rx_id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) dma_tx_id = rspi_pd->dma_tx_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) dma_rx_id = rspi_pd->dma_rx_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) /* The driver assumes no error. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) ctlr->dma_tx = rspi_request_dma_chan(dev, DMA_MEM_TO_DEV, dma_tx_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) res->start + RSPI_SPDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) if (!ctlr->dma_tx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) ctlr->dma_rx = rspi_request_dma_chan(dev, DMA_DEV_TO_MEM, dma_rx_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) res->start + RSPI_SPDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) if (!ctlr->dma_rx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) dma_release_channel(ctlr->dma_tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) ctlr->dma_tx = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) ctlr->can_dma = rspi_can_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) dev_info(dev, "DMA available");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) static void rspi_release_dma(struct spi_controller *ctlr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) if (ctlr->dma_tx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) dma_release_channel(ctlr->dma_tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) if (ctlr->dma_rx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) dma_release_channel(ctlr->dma_rx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) static int rspi_remove(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) struct rspi_data *rspi = platform_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) rspi_release_dma(rspi->ctlr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) pm_runtime_disable(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) static const struct spi_ops rspi_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) .set_config_register = rspi_set_config_register,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) .transfer_one = rspi_transfer_one,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) .min_div = 2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) .max_div = 4096,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) .flags = SPI_CONTROLLER_MUST_TX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) .fifo_size = 8,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) .num_hw_ss = 2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) static const struct spi_ops rspi_rz_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) .set_config_register = rspi_rz_set_config_register,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) .transfer_one = rspi_rz_transfer_one,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) .min_div = 2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) .max_div = 4096,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) .flags = SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) .fifo_size = 8, /* 8 for TX, 32 for RX */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) .num_hw_ss = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) static const struct spi_ops qspi_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) .set_config_register = qspi_set_config_register,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) .transfer_one = qspi_transfer_one,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) .extra_mode_bits = SPI_TX_DUAL | SPI_TX_QUAD |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) SPI_RX_DUAL | SPI_RX_QUAD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) .min_div = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) .max_div = 4080,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) .flags = SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) .fifo_size = 32,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) .num_hw_ss = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) #ifdef CONFIG_OF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) static const struct of_device_id rspi_of_match[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) /* RSPI on legacy SH */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) { .compatible = "renesas,rspi", .data = &rspi_ops },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) /* RSPI on RZ/A1H */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) { .compatible = "renesas,rspi-rz", .data = &rspi_rz_ops },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) /* QSPI on R-Car Gen2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) { .compatible = "renesas,qspi", .data = &qspi_ops },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) { /* sentinel */ }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) MODULE_DEVICE_TABLE(of, rspi_of_match);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) static int rspi_parse_dt(struct device *dev, struct spi_controller *ctlr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) u32 num_cs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) /* Parse DT properties */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) error = of_property_read_u32(dev->of_node, "num-cs", &num_cs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) if (error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) dev_err(dev, "of_property_read_u32 num-cs failed %d\n", error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) ctlr->num_chipselect = num_cs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) #define rspi_of_match NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) static inline int rspi_parse_dt(struct device *dev, struct spi_controller *ctlr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) #endif /* CONFIG_OF */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) static int rspi_request_irq(struct device *dev, unsigned int irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) irq_handler_t handler, const char *suffix,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) void *dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) const char *name = devm_kasprintf(dev, GFP_KERNEL, "%s:%s",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) dev_name(dev), suffix);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) if (!name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) return devm_request_irq(dev, irq, handler, 0, name, dev_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) static int rspi_probe(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) struct resource *res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) struct spi_controller *ctlr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) struct rspi_data *rspi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) const struct rspi_plat_data *rspi_pd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) const struct spi_ops *ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) unsigned long clksrc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) ctlr = spi_alloc_master(&pdev->dev, sizeof(struct rspi_data));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) if (ctlr == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) ops = of_device_get_match_data(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) if (ops) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) ret = rspi_parse_dt(&pdev->dev, ctlr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) goto error1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) ops = (struct spi_ops *)pdev->id_entry->driver_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) rspi_pd = dev_get_platdata(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) if (rspi_pd && rspi_pd->num_chipselect)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) ctlr->num_chipselect = rspi_pd->num_chipselect;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) ctlr->num_chipselect = 2; /* default */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) rspi = spi_controller_get_devdata(ctlr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) platform_set_drvdata(pdev, rspi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) rspi->ops = ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) rspi->ctlr = ctlr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) rspi->addr = devm_ioremap_resource(&pdev->dev, res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) if (IS_ERR(rspi->addr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) ret = PTR_ERR(rspi->addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) goto error1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) rspi->clk = devm_clk_get(&pdev->dev, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) if (IS_ERR(rspi->clk)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) dev_err(&pdev->dev, "cannot get clock\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) ret = PTR_ERR(rspi->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) goto error1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) rspi->pdev = pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) pm_runtime_enable(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) init_waitqueue_head(&rspi->wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) spin_lock_init(&rspi->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) ctlr->bus_num = pdev->id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) ctlr->setup = rspi_setup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) ctlr->auto_runtime_pm = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) ctlr->transfer_one = ops->transfer_one;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) ctlr->prepare_message = rspi_prepare_message;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) ctlr->unprepare_message = rspi_unprepare_message;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) ctlr->mode_bits = SPI_CPHA | SPI_CPOL | SPI_CS_HIGH | SPI_LSB_FIRST |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) SPI_LOOP | ops->extra_mode_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) clksrc = clk_get_rate(rspi->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) ctlr->min_speed_hz = DIV_ROUND_UP(clksrc, ops->max_div);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) ctlr->max_speed_hz = DIV_ROUND_UP(clksrc, ops->min_div);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) ctlr->flags = ops->flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) ctlr->dev.of_node = pdev->dev.of_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) ctlr->use_gpio_descriptors = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) ctlr->max_native_cs = rspi->ops->num_hw_ss;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) ret = platform_get_irq_byname_optional(pdev, "rx");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) ret = platform_get_irq_byname_optional(pdev, "mux");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) ret = platform_get_irq(pdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) if (ret >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) rspi->rx_irq = rspi->tx_irq = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) rspi->rx_irq = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) ret = platform_get_irq_byname(pdev, "tx");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) if (ret >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) rspi->tx_irq = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) if (rspi->rx_irq == rspi->tx_irq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) /* Single multiplexed interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) ret = rspi_request_irq(&pdev->dev, rspi->rx_irq, rspi_irq_mux,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) "mux", rspi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) /* Multi-interrupt mode, only SPRI and SPTI are used */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) ret = rspi_request_irq(&pdev->dev, rspi->rx_irq, rspi_irq_rx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) "rx", rspi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) ret = rspi_request_irq(&pdev->dev, rspi->tx_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) rspi_irq_tx, "tx", rspi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) dev_err(&pdev->dev, "request_irq error\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) goto error2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) ret = rspi_request_dma(&pdev->dev, ctlr, res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) dev_warn(&pdev->dev, "DMA not available, using PIO\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) ret = devm_spi_register_controller(&pdev->dev, ctlr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) dev_err(&pdev->dev, "devm_spi_register_controller error.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) goto error3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) dev_info(&pdev->dev, "probed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) error3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) rspi_release_dma(ctlr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) error2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) pm_runtime_disable(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) error1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) spi_controller_put(ctlr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) static const struct platform_device_id spi_driver_ids[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) { "rspi", (kernel_ulong_t)&rspi_ops },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) {},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) MODULE_DEVICE_TABLE(platform, spi_driver_ids);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) #ifdef CONFIG_PM_SLEEP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) static int rspi_suspend(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) struct rspi_data *rspi = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) return spi_controller_suspend(rspi->ctlr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) static int rspi_resume(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) struct rspi_data *rspi = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) return spi_controller_resume(rspi->ctlr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) static SIMPLE_DEV_PM_OPS(rspi_pm_ops, rspi_suspend, rspi_resume);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) #define DEV_PM_OPS &rspi_pm_ops
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) #define DEV_PM_OPS NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) #endif /* CONFIG_PM_SLEEP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) static struct platform_driver rspi_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) .probe = rspi_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) .remove = rspi_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) .id_table = spi_driver_ids,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) .driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) .name = "renesas_spi",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) .pm = DEV_PM_OPS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) .of_match_table = of_match_ptr(rspi_of_match),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) module_platform_driver(rspi_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) MODULE_DESCRIPTION("Renesas RSPI bus driver");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) MODULE_LICENSE("GPL v2");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) MODULE_AUTHOR("Yoshihiro Shimoda");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) MODULE_ALIAS("platform:rspi");